From 55779679ad5e7dafb2b2d565bd7bca41337b74d6 Mon Sep 17 00:00:00 2001 From: L2501 Date: Thu, 14 Sep 2023 08:32:59 +0000 Subject: [PATCH] [script.module.mutagen] 1.47.0 --- .../{LICENSE => LICENCE.txt} | 0 script.module.mutagen/README.rst | 28 - script.module.mutagen/addon.xml | 6 +- script.module.mutagen/lib/mutagen/__init__.py | 3 +- script.module.mutagen/lib/mutagen/_compat.py | 94 --- .../lib/mutagen/_constants.py | 1 - script.module.mutagen/lib/mutagen/_file.py | 28 +- script.module.mutagen/lib/mutagen/_iff.py | 399 +++++++++++ script.module.mutagen/lib/mutagen/_riff.py | 69 ++ .../lib/mutagen/_senf/__init__.py | 91 --- .../lib/mutagen/_senf/_argv.py | 120 ---- .../lib/mutagen/_senf/_compat.py | 58 -- .../lib/mutagen/_senf/_environ.py | 270 ------- .../lib/mutagen/_senf/_fsnative.py | 666 ------------------ .../lib/mutagen/_senf/_print.py | 424 ----------- .../lib/mutagen/_senf/_stdlib.py | 154 ---- .../lib/mutagen/_senf/_temp.py | 96 --- .../lib/mutagen/_senf/_winansi.py | 319 --------- .../lib/mutagen/_senf/_winapi.py | 222 ------ script.module.mutagen/lib/mutagen/_tags.py | 5 +- .../lib/mutagen/_tools/__init__.py | 1 - .../lib/mutagen/_tools/_util.py | 12 +- .../lib/mutagen/_tools/mid3cp.py | 30 +- .../lib/mutagen/_tools/mid3iconv.py | 19 +- .../lib/mutagen/_tools/mid3v2.py | 107 ++- .../lib/mutagen/_tools/moggsplit.py | 5 +- .../lib/mutagen/_tools/mutagen_inspect.py | 18 +- .../lib/mutagen/_tools/mutagen_pony.py | 13 +- script.module.mutagen/lib/mutagen/_util.py | 213 ++---- script.module.mutagen/lib/mutagen/_vorbis.py | 54 +- script.module.mutagen/lib/mutagen/aac.py | 17 +- script.module.mutagen/lib/mutagen/ac3.py | 7 +- script.module.mutagen/lib/mutagen/aiff.py | 335 ++------- script.module.mutagen/lib/mutagen/apev2.py | 101 +-- .../lib/mutagen/asf/__init__.py | 19 +- .../lib/mutagen/asf/_attrs.py | 43 +- .../lib/mutagen/asf/_objects.py | 33 +- .../lib/mutagen/asf/_util.py | 10 +- script.module.mutagen/lib/mutagen/dsdiff.py | 266 +++++++ script.module.mutagen/lib/mutagen/dsf.py | 9 +- script.module.mutagen/lib/mutagen/easyid3.py | 49 +- script.module.mutagen/lib/mutagen/easymp4.py | 40 +- script.module.mutagen/lib/mutagen/flac.py | 53 +- .../lib/mutagen/id3/__init__.py | 3 +- .../lib/mutagen/id3/_file.py | 5 +- .../lib/mutagen/id3/_frames.py | 66 +- .../lib/mutagen/id3/_id3v1.py | 11 +- .../lib/mutagen/id3/_specs.py | 64 +- .../lib/mutagen/id3/_tags.py | 77 +- .../lib/mutagen/id3/_util.py | 28 +- script.module.mutagen/lib/mutagen/m4a.py | 1 - .../lib/mutagen/monkeysaudio.py | 4 +- .../lib/mutagen/mp3/__init__.py | 38 +- .../lib/mutagen/mp3/_util.py | 14 +- .../lib/mutagen/mp4/__init__.py | 207 +++++- .../lib/mutagen/mp4/_as_entry.py | 29 +- .../lib/mutagen/mp4/_atom.py | 10 +- .../lib/mutagen/mp4/_util.py | 1 - script.module.mutagen/lib/mutagen/musepack.py | 18 +- script.module.mutagen/lib/mutagen/ogg.py | 46 +- script.module.mutagen/lib/mutagen/oggflac.py | 8 +- script.module.mutagen/lib/mutagen/oggopus.py | 3 +- script.module.mutagen/lib/mutagen/oggspeex.py | 1 - .../lib/mutagen/oggtheora.py | 19 +- .../lib/mutagen/oggvorbis.py | 10 +- .../lib/mutagen/optimfrog.py | 4 +- script.module.mutagen/lib/mutagen/smf.py | 13 +- script.module.mutagen/lib/mutagen/tak.py | 8 +- .../lib/mutagen/trueaudio.py | 14 +- script.module.mutagen/lib/mutagen/wave.py | 209 ++++++ script.module.mutagen/lib/mutagen/wavpack.py | 8 +- .../{ => resources}/icon.png | Bin 72 files changed, 1773 insertions(+), 3653 deletions(-) rename script.module.mutagen/{LICENSE => LICENCE.txt} (100%) delete mode 100644 script.module.mutagen/README.rst delete mode 100644 script.module.mutagen/lib/mutagen/_compat.py create mode 100644 script.module.mutagen/lib/mutagen/_iff.py create mode 100644 script.module.mutagen/lib/mutagen/_riff.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/__init__.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_argv.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_compat.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_environ.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_fsnative.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_print.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_stdlib.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_temp.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_winansi.py delete mode 100644 script.module.mutagen/lib/mutagen/_senf/_winapi.py create mode 100644 script.module.mutagen/lib/mutagen/dsdiff.py create mode 100644 script.module.mutagen/lib/mutagen/wave.py rename script.module.mutagen/{ => resources}/icon.png (100%) diff --git a/script.module.mutagen/LICENSE b/script.module.mutagen/LICENCE.txt similarity index 100% rename from script.module.mutagen/LICENSE rename to script.module.mutagen/LICENCE.txt diff --git a/script.module.mutagen/README.rst b/script.module.mutagen/README.rst deleted file mode 100644 index 8faa47210..000000000 --- a/script.module.mutagen/README.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. image:: https://cdn.rawgit.com/quodlibet/mutagen/master/docs/images/logo.svg - :align: center - :width: 400px - -| - -Mutagen is a Python module to handle audio metadata. It supports ASF, FLAC, -MP4, Monkey's Audio, MP3, Musepack, Ogg Opus, Ogg FLAC, Ogg Speex, Ogg Theora, -Ogg Vorbis, True Audio, WavPack, OptimFROG, and AIFF audio files. All -versions of ID3v2 are supported, and all standard ID3v2.4 frames are parsed. -It can read Xing headers to accurately calculate the bitrate and length of -MP3s. ID3 and APEv2 tags can be edited regardless of audio format. It can also -manipulate Ogg streams on an individual packet/page level. - -Mutagen works with Python 3.5+ (CPython and PyPy) on Linux, Windows and -macOS, and has no dependencies outside the Python standard library. Mutagen -is licensed under the GPL version 2 or later. - -For more information visit https://mutagen.readthedocs.org - -.. image:: https://travis-ci.org/quodlibet/mutagen.svg?branch=master - :target: https://travis-ci.org/quodlibet/mutagen - -.. image:: https://dev.azure.com/quodlibet/mutagen/_apis/build/status/quodlibet.mutagen - :target: https://dev.azure.com/quodlibet/mutagen/_build/latest?definitionId=3 - -.. image:: https://codecov.io/gh/quodlibet/mutagen/branch/master/graph/badge.svg - :target: https://codecov.io/gh/quodlibet/mutagen diff --git a/script.module.mutagen/addon.xml b/script.module.mutagen/addon.xml index f7356d7bf..eaae9baae 100644 --- a/script.module.mutagen/addon.xml +++ b/script.module.mutagen/addon.xml @@ -1,7 +1,7 @@ - + - + @@ -12,7 +12,7 @@ https://mutagen.readthedocs.io/en/latest/ https://github.com/quodlibet/mutagen - icon.png + resources/icon.png diff --git a/script.module.mutagen/lib/mutagen/__init__.py b/script.module.mutagen/lib/mutagen/__init__.py index 8e477551b..4236d84d9 100644 --- a/script.module.mutagen/lib/mutagen/__init__.py +++ b/script.module.mutagen/lib/mutagen/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -23,7 +22,7 @@ from mutagen._file import FileType, StreamInfo, File from mutagen._tags import Tags, Metadata, PaddingInfo -version = (1, 44, 0) +version = (1, 47, 0) """Version tuple.""" version_string = ".".join(map(str, version)) diff --git a/script.module.mutagen/lib/mutagen/_compat.py b/script.module.mutagen/lib/mutagen/_compat.py deleted file mode 100644 index ca6ae1ac8..000000000 --- a/script.module.mutagen/lib/mutagen/_compat.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = not PY2 - -if PY2: - from StringIO import StringIO - BytesIO = StringIO - from cStringIO import StringIO as cBytesIO - from itertools import izip, izip_longest - - long_ = long - integer_types = (int, long) - string_types = (str, unicode) - text_type = unicode - - xrange = xrange - cmp = cmp - chr_ = chr - - def endswith(text, end): - return text.endswith(end) - - iteritems = lambda d: d.iteritems() - itervalues = lambda d: d.itervalues() - iterkeys = lambda d: d.iterkeys() - - iterbytes = lambda b: iter(b) - - exec("def reraise(tp, value, tb):\n raise tp, value, tb") - - def swap_to_string(cls): - if "__str__" in cls.__dict__: - cls.__unicode__ = cls.__str__ - - if "__bytes__" in cls.__dict__: - cls.__str__ = cls.__bytes__ - - return cls - - import __builtin__ as builtins - builtins - -elif PY3: - from io import StringIO - StringIO = StringIO - from io import BytesIO - cBytesIO = BytesIO - from itertools import zip_longest - - long_ = int - integer_types = (int,) - string_types = (str,) - text_type = str - - izip_longest = zip_longest - izip = zip - xrange = range - cmp = lambda a, b: (a > b) - (a < b) - chr_ = lambda x: bytes([x]) - - def endswith(text, end): - # usefull for paths which can be both, str and bytes - if isinstance(text, str): - if not isinstance(end, str): - end = end.decode("ascii") - else: - if not isinstance(end, bytes): - end = end.encode("ascii") - return text.endswith(end) - - iteritems = lambda d: iter(d.items()) - itervalues = lambda d: iter(d.values()) - iterkeys = lambda d: iter(d.keys()) - - iterbytes = lambda b: (bytes([v]) for v in b) - - def reraise(tp, value, tb): - raise tp(value).with_traceback(tb) - - def swap_to_string(cls): - return cls - - import builtins - builtins diff --git a/script.module.mutagen/lib/mutagen/_constants.py b/script.module.mutagen/lib/mutagen/_constants.py index 5c1c1a10d..772823155 100644 --- a/script.module.mutagen/lib/mutagen/_constants.py +++ b/script.module.mutagen/lib/mutagen/_constants.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/script.module.mutagen/lib/mutagen/_file.py b/script.module.mutagen/lib/mutagen/_file.py index 850556e49..edf9e0cce 100644 --- a/script.module.mutagen/lib/mutagen/_file.py +++ b/script.module.mutagen/lib/mutagen/_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -7,9 +6,9 @@ # (at your option) any later version. import warnings +from typing import List from mutagen._util import DictMixin, loadfile -from mutagen._compat import izip class FileType(DictMixin): @@ -83,9 +82,9 @@ def __delitem__(self, key): if self.tags is None: raise KeyError(key) else: - del(self.tags[key]) + del self.tags[key] - def keys(self): + def keys(self) -> list: """Return a list of keys in the metadata tag. If the file has no tags at all, an empty list is returned. @@ -132,12 +131,13 @@ def save(self, filething=None, **kwargs): if self.tags is not None: return self.tags.save(filething, **kwargs) - def pprint(self): + def pprint(self) -> str: """ Returns: text: stream information and comment key=value pairs. """ + assert self.info is not None stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) try: tags = self.tags.pprint() @@ -146,7 +146,7 @@ def pprint(self): else: return stream + ((tags and "\n" + tags) or "") - def add_tags(self): + def add_tags(self) -> None: """Adds new tags to the file. Raises: @@ -157,7 +157,7 @@ def add_tags(self): raise NotImplementedError @property - def mime(self): + def mime(self) -> List[str]: """A list of mime types (:class:`mutagen.text`)""" mimes = [] @@ -168,7 +168,7 @@ def mime(self): return mimes @staticmethod - def score(filename, fileobj, header): + def score(filename, fileobj, header) -> int: """Returns a score for how likely the file can be parsed by this type. Args: @@ -196,7 +196,7 @@ class StreamInfo(object): __module__ = "mutagen" - def pprint(self): + def pprint(self) -> str: """ Returns: text: Print stream information @@ -221,13 +221,13 @@ def File(filething, options=None, easy=False): filething (filething) options: Sequence of :class:`FileType` implementations, defaults to all included ones. - easy (bool): If the easy wrappers should be returnd if available. + easy (bool): If the easy wrappers should be returned if available. For example :class:`EasyMP3 ` instead of :class:`MP3 `. Returns: FileType: A FileType instance for the detected type or `None` in case - the type couln't be determined. + the type couldn't be determined. Raises: MutagenError: in case the detected type fails to load the file. @@ -268,10 +268,12 @@ def File(filething, options=None, easy=False): from mutagen.smf import SMF from mutagen.tak import TAK from mutagen.dsf import DSF + from mutagen.dsdiff import DSDIFF + from mutagen.wave import WAVE options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC, FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack, Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC, AC3, - SMF, TAK, DSF] + SMF, TAK, DSF, DSDIFF, WAVE] if not options: return None @@ -289,7 +291,7 @@ def File(filething, options=None, easy=False): results = [(Kind.score(filething.name, fileobj, header), Kind.__name__) for Kind in options] - results = list(izip(results, options)) + results = list(zip(results, options)) results.sort() (score, name), Kind = results[-1] if score > 0: diff --git a/script.module.mutagen/lib/mutagen/_iff.py b/script.module.mutagen/lib/mutagen/_iff.py new file mode 100644 index 000000000..cdc6e3d8f --- /dev/null +++ b/script.module.mutagen/lib/mutagen/_iff.py @@ -0,0 +1,399 @@ +# Copyright (C) 2014 Evan Purkhiser +# 2014 Ben Ockmore +# 2017 Borewit +# 2019-2021 Philipp Wolfer +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +"""Base classes for various IFF based formats (e.g. AIFF or RIFF).""" + +import sys + +from mutagen.id3 import ID3 +from mutagen.id3._util import ID3NoHeaderError, error as ID3Error +from mutagen._util import ( + MutagenError, + convert_error, + delete_bytes, + insert_bytes, + loadfile, + reraise, + resize_bytes, +) + + +class error(MutagenError): + pass + + +class InvalidChunk(error): + pass + + +class EmptyChunk(InvalidChunk): + pass + + +def is_valid_chunk_id(id: str) -> bool: + """ is_valid_chunk_id(FOURCC) + + Arguments: + id (FOURCC) + Returns: + true if valid; otherwise false + + Check if argument id is valid FOURCC type. + """ + + assert isinstance(id, str), \ + 'id is of type %s, must be str: %r' % (type(id), id) + + return ((0 < len(id) <= 4) and (min(id) >= ' ') and + (max(id) <= '~')) + + +# Assert FOURCC formatted valid +def assert_valid_chunk_id(id: str) -> None: + if not is_valid_chunk_id(id): + raise ValueError("IFF chunk ID must be four ASCII characters.") + + +class IffChunk(object): + """Generic representation of a single IFF chunk. + + IFF chunks always consist of an ID followed by the chunk size. The exact + format varies between different IFF based formats, e.g. AIFF uses + big-endian while RIFF uses little-endian. + """ + + # Chunk headers are usually 8 bytes long (4 for ID and 4 for the size) + HEADER_SIZE = 8 + + @classmethod + def parse_header(cls, header): + """Read ID and data_size from the given header. + Must be implemented in subclasses.""" + raise error("Not implemented") + + def write_new_header(self, id_, size): + """Write the chunk header with id_ and size to the file. + Must be implemented in subclasses. The data must be written + to the current position in self._fileobj.""" + raise error("Not implemented") + + def write_size(self): + """Write self.data_size to the file. + Must be implemented in subclasses. The data must be written + to the current position in self._fileobj.""" + raise error("Not implemented") + + @classmethod + def get_class(cls, id): + """Returns the class for a new chunk for a given ID. + Can be overridden in subclasses to implement specific chunk types.""" + return cls + + @classmethod + def parse(cls, fileobj, parent_chunk=None): + header = fileobj.read(cls.HEADER_SIZE) + if len(header) < cls.HEADER_SIZE: + raise EmptyChunk('Header size < %i' % cls.HEADER_SIZE) + id, data_size = cls.parse_header(header) + try: + id = id.decode('ascii').rstrip() + except UnicodeDecodeError as e: + raise InvalidChunk(e) + + if not is_valid_chunk_id(id): + raise InvalidChunk('Invalid chunk ID %r' % id) + + return cls.get_class(id)(fileobj, id, data_size, parent_chunk) + + def __init__(self, fileobj, id, data_size, parent_chunk): + self._fileobj = fileobj + self.id = id + self.data_size = data_size + self.parent_chunk = parent_chunk + self.data_offset = fileobj.tell() + self.offset = self.data_offset - self.HEADER_SIZE + self._calculate_size() + + def __repr__(self): + return ("<%s id=%s, offset=%i, size=%i, data_offset=%i, data_size=%i>" + % (type(self).__name__, self.id, self.offset, self.size, + self.data_offset, self.data_size)) + + def read(self) -> bytes: + """Read the chunks data""" + + self._fileobj.seek(self.data_offset) + return self._fileobj.read(self.data_size) + + def write(self, data: bytes) -> None: + """Write the chunk data""" + + if len(data) > self.data_size: + raise ValueError + + self._fileobj.seek(self.data_offset) + self._fileobj.write(data) + # Write the padding bytes + padding = self.padding() + if padding: + self._fileobj.seek(self.data_offset + self.data_size) + self._fileobj.write(b'\x00' * padding) + + def delete(self) -> None: + """Removes the chunk from the file""" + + delete_bytes(self._fileobj, self.size, self.offset) + if self.parent_chunk is not None: + self.parent_chunk._remove_subchunk(self) + self._fileobj.flush() + + def _update_size(self, size_diff, changed_subchunk=None): + """Update the size of the chunk""" + + old_size = self.size + self.data_size += size_diff + self._fileobj.seek(self.offset + 4) + self.write_size() + self._calculate_size() + if self.parent_chunk is not None: + self.parent_chunk._update_size(self.size - old_size, self) + if changed_subchunk: + self._update_sibling_offsets( + changed_subchunk, old_size - self.size) + + def _calculate_size(self): + self.size = self.HEADER_SIZE + self.data_size + self.padding() + assert self.size % 2 == 0 + + def resize(self, new_data_size: int) -> None: + """Resize the file and update the chunk sizes""" + + old_size = self._get_actual_data_size() + padding = new_data_size % 2 + resize_bytes(self._fileobj, old_size, + new_data_size + padding, self.data_offset) + size_diff = new_data_size - self.data_size + self._update_size(size_diff) + self._fileobj.flush() + + def padding(self) -> int: + """Returns the number of padding bytes (0 or 1). + IFF chunks are required to be a even number in total length. If + data_size is odd a padding byte will be added at the end. + """ + return self.data_size % 2 + + def _get_actual_data_size(self) -> int: + """Returns the data size that is actually possible. + Some files have chunks that are truncated and their reported size + would be outside of the file's actual size.""" + fileobj = self._fileobj + fileobj.seek(0, 2) + file_size = fileobj.tell() + + expected_size = self.data_size + self.padding() + max_size_possible = file_size - self.data_offset + return min(expected_size, max_size_possible) + + +class IffContainerChunkMixin(): + """A IFF chunk containing other chunks. + + A container chunk can have an additional name as the first 4 bytes of the + chunk data followed by an arbitrary number of subchunks. The root chunk of + the file is always a container chunk (e.g. the AIFF chunk or the FORM chunk + for RIFF) but there can be other types of container chunks (e.g. the LIST + chunks used in RIFF). + """ + + def parse_next_subchunk(self): + """""" + raise error("Not implemented") + + def init_container(self, name_size=4): + # Lists can store an additional name identifier before the subchunks + self.__name_size = name_size + if self.data_size < name_size: + raise InvalidChunk( + 'Container chunk data size < %i' % name_size) + + # Read the container name + if name_size > 0: + try: + self.name = self._fileobj.read(name_size).decode('ascii') + except UnicodeDecodeError as e: + raise error(e) + else: + self.name = None + + # Load all IFF subchunks + self.__subchunks = [] + + def subchunks(self): + """Returns a list of all subchunks. + The list is lazily loaded on first access. + """ + if not self.__subchunks: + next_offset = self.data_offset + self.__name_size + while next_offset < self.offset + self.size: + self._fileobj.seek(next_offset) + try: + chunk = self.parse_next_subchunk() + except EmptyChunk: + break + except InvalidChunk: + break + self.__subchunks.append(chunk) + + # Calculate the location of the next chunk + next_offset = chunk.offset + chunk.size + return self.__subchunks + + def insert_chunk(self, id_, data=None): + """Insert a new chunk at the end of the container chunk""" + + if not is_valid_chunk_id(id_): + raise KeyError("Invalid IFF key.") + + next_offset = self.data_offset + self._get_actual_data_size() + size = self.HEADER_SIZE + data_size = 0 + if data: + data_size = len(data) + padding = data_size % 2 + size += data_size + padding + insert_bytes(self._fileobj, size, next_offset) + self._fileobj.seek(next_offset) + self.write_new_header(id_.ljust(4).encode('ascii'), data_size) + self._fileobj.seek(next_offset) + chunk = self.parse_next_subchunk() + self._update_size(chunk.size) + if data: + chunk.write(data) + self.subchunks().append(chunk) + self._fileobj.flush() + return chunk + + def __contains__(self, id_): + """Check if this chunk contains a specific subchunk.""" + assert_valid_chunk_id(id_) + try: + self[id_] + return True + except KeyError: + return False + + def __getitem__(self, id_): + """Get a subchunk by ID.""" + assert_valid_chunk_id(id_) + found_chunk = None + for chunk in self.subchunks(): + if chunk.id == id_: + found_chunk = chunk + break + else: + raise KeyError("No %r chunk found" % id_) + return found_chunk + + def __delitem__(self, id_): + """Remove a chunk from the IFF file""" + assert_valid_chunk_id(id_) + self[id_].delete() + + def _remove_subchunk(self, chunk): + assert chunk in self.__subchunks + self._update_size(-chunk.size, chunk) + self.__subchunks.remove(chunk) + + def _update_sibling_offsets(self, changed_subchunk, size_diff): + """Update the offsets of subchunks after `changed_subchunk`. + """ + index = self.__subchunks.index(changed_subchunk) + sibling_chunks = self.__subchunks[index + 1:len(self.__subchunks)] + for sibling in sibling_chunks: + sibling.offset -= size_diff + sibling.data_offset -= size_diff + + +class IffFile: + """Representation of a IFF file""" + + def __init__(self, chunk_cls, fileobj): + fileobj.seek(0) + self.root = chunk_cls.parse(fileobj) + + def __contains__(self, id_): + """Check if the IFF file contains a specific chunk""" + return id_ in self.root + + def __getitem__(self, id_): + """Get a chunk from the IFF file""" + return self.root[id_] + + def __delitem__(self, id_): + """Remove a chunk from the IFF file""" + self.delete_chunk(id_) + + def delete_chunk(self, id_): + """Remove a chunk from the IFF file""" + del self.root[id_] + + def insert_chunk(self, id_, data=None): + """Insert a new chunk at the end of the IFF file""" + return self.root.insert_chunk(id_, data) + + +class IffID3(ID3): + """A generic IFF file with ID3v2 tags""" + + def _load_file(self, fileobj): + raise error("Not implemented") + + def _pre_load_header(self, fileobj): + try: + fileobj.seek(self._load_file(fileobj)['ID3'].data_offset) + except (InvalidChunk, KeyError): + raise ID3NoHeaderError("No ID3 chunk") + + @convert_error(IOError, error) + @loadfile(writable=True) + def save(self, filething=None, v2_version=4, v23_sep='/', padding=None): + """Save ID3v2 data to the IFF file""" + + fileobj = filething.fileobj + + iff_file = self._load_file(fileobj) + + if 'ID3' not in iff_file: + iff_file.insert_chunk('ID3') + + chunk = iff_file['ID3'] + + try: + data = self._prepare_data( + fileobj, chunk.data_offset, chunk.data_size, v2_version, + v23_sep, padding) + except ID3Error as e: + reraise(error, e, sys.exc_info()[2]) + + chunk.resize(len(data)) + chunk.write(data) + + @convert_error(IOError, error) + @loadfile(writable=True) + def delete(self, filething=None): + """Completely removes the ID3 chunk from the IFF file""" + + try: + iff_file = self._load_file(filething.fileobj) + del iff_file['ID3'] + except KeyError: + pass + self.clear() diff --git a/script.module.mutagen/lib/mutagen/_riff.py b/script.module.mutagen/lib/mutagen/_riff.py new file mode 100644 index 000000000..f3f23f6a5 --- /dev/null +++ b/script.module.mutagen/lib/mutagen/_riff.py @@ -0,0 +1,69 @@ +# Copyright (C) 2017 Borewit +# Copyright (C) 2019-2020 Philipp Wolfer +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +"""Resource Interchange File Format (RIFF).""" + +import struct +from struct import pack + +from mutagen._iff import ( + IffChunk, + IffContainerChunkMixin, + IffFile, + InvalidChunk, +) + + +class RiffChunk(IffChunk): + """Generic RIFF chunk""" + + @classmethod + def parse_header(cls, header): + return struct.unpack('<4sI', header) + + @classmethod + def get_class(cls, id): + if id in (u'LIST', u'RIFF'): + return RiffListChunk + else: + return cls + + def write_new_header(self, id_, size): + self._fileobj.write(pack('<4sI', id_, size)) + + def write_size(self): + self._fileobj.write(pack(' use our ctypes one as well - try: - del_windows_env_var(key) - except WindowsError: - pass - else: - os.unsetenv(key) - - -def putenv(key, value): - """Like `os.putenv` but takes unicode under Windows + Python 2 - - Args: - key (pathlike): The env var to get - value (pathlike): The value to set - Raises: - ValueError - """ - - key = path2fsn(key) - value = path2fsn(value) - - if is_win and PY2: - try: - set_windows_env_var(key, value) - except WindowsError: - # py3 + win fails here - raise ValueError - else: - try: - os.putenv(key, value) - except OSError: - # win + py3 raise here for invalid keys which is probably a bug. - # ValueError seems better - raise ValueError diff --git a/script.module.mutagen/lib/mutagen/_senf/_fsnative.py b/script.module.mutagen/lib/mutagen/_senf/_fsnative.py deleted file mode 100644 index a1e5967cc..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_fsnative.py +++ /dev/null @@ -1,666 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import os -import sys -import ctypes -import codecs - -from . import _winapi as winapi -from ._compat import text_type, PY3, PY2, urlparse, quote, unquote, urlunparse - - -is_win = os.name == "nt" -is_unix = not is_win -is_darwin = sys.platform == "darwin" - -_surrogatepass = "strict" if PY2 else "surrogatepass" - - -def _normalize_codec(codec, _cache={}): - """Raises LookupError""" - - try: - return _cache[codec] - except KeyError: - _cache[codec] = codecs.lookup(codec).name - return _cache[codec] - - -def _swap_bytes(data): - """swaps bytes for 16 bit, leaves remaining trailing bytes alone""" - - a, b = data[1::2], data[::2] - data = bytearray().join(bytearray(x) for x in zip(a, b)) - if len(b) > len(a): - data += b[-1:] - return bytes(data) - - -def _codec_fails_on_encode_surrogates(codec, _cache={}): - """Returns if a codec fails correctly when passing in surrogates with - a surrogatepass/surrogateescape error handler. Some codecs were broken - in Python <3.4 - """ - - try: - return _cache[codec] - except KeyError: - try: - u"\uD800\uDC01".encode(codec) - except UnicodeEncodeError: - _cache[codec] = True - else: - _cache[codec] = False - return _cache[codec] - - -def _codec_can_decode_with_surrogatepass(codec, _cache={}): - """Returns if a codec supports the surrogatepass error handler when - decoding. Some codecs were broken in Python <3.4 - """ - - try: - return _cache[codec] - except KeyError: - try: - u"\ud83d".encode( - codec, _surrogatepass).decode(codec, _surrogatepass) - except UnicodeDecodeError: - _cache[codec] = False - else: - _cache[codec] = True - return _cache[codec] - - -def _decode_surrogatepass(data, codec): - """Like data.decode(codec, 'surrogatepass') but makes utf-16-le/be work - on Python < 3.4 + Windows - - https://bugs.python.org/issue27971 - - Raises UnicodeDecodeError, LookupError - """ - - try: - return data.decode(codec, _surrogatepass) - except UnicodeDecodeError: - if not _codec_can_decode_with_surrogatepass(codec): - if _normalize_codec(codec) == "utf-16-be": - data = _swap_bytes(data) - codec = "utf-16-le" - if _normalize_codec(codec) == "utf-16-le": - buffer_ = ctypes.create_string_buffer(data + b"\x00\x00") - value = ctypes.wstring_at(buffer_, len(data) // 2) - if value.encode("utf-16-le", _surrogatepass) != data: - raise - return value - else: - raise - else: - raise - - -def _winpath2bytes_py3(text, codec): - """Fallback implementation for text including surrogates""" - - # merge surrogate codepoints - if _normalize_codec(codec).startswith("utf-16"): - # fast path, utf-16 merges anyway - return text.encode(codec, _surrogatepass) - return _decode_surrogatepass( - text.encode("utf-16-le", _surrogatepass), - "utf-16-le").encode(codec, _surrogatepass) - - -if PY2: - def _winpath2bytes(text, codec): - return text.encode(codec) -else: - def _winpath2bytes(text, codec): - if _codec_fails_on_encode_surrogates(codec): - try: - return text.encode(codec) - except UnicodeEncodeError: - return _winpath2bytes_py3(text, codec) - else: - return _winpath2bytes_py3(text, codec) - - -def fsn2norm(path): - """ - Args: - path (fsnative): The path to normalize - Returns: - `fsnative` - - Normalizes an fsnative path. - - The same underlying path can have multiple representations as fsnative - (due to surrogate pairs and variable length encodings). When concatenating - fsnative the result might be different than concatenating the serialized - form and then deserializing it. - - This returns the normalized form i.e. the form which os.listdir() would - return. This is useful when you alter fsnative but require that the same - underlying path always maps to the same fsnative value. - - All functions like :func:`bytes2fsn`, :func:`fsnative`, :func:`text2fsn` - and :func:`path2fsn` always return a normalized path, independent of their - input. - """ - - native = _fsn2native(path) - - if is_win: - return _decode_surrogatepass( - native.encode("utf-16-le", _surrogatepass), - "utf-16-le") - elif PY3: - return bytes2fsn(native, None) - else: - return path - - -def _fsn2legacy(path): - """Takes a fsnative path and returns a path that can be put into os.environ - or sys.argv. Might result in a mangled path on Python2 + Windows. - Can't fail. - - Args: - path (fsnative) - Returns: - str - """ - - if PY2 and is_win: - return path.encode(_encoding, "replace") - return path - - -def _fsnative(text): - if not isinstance(text, text_type): - raise TypeError("%r needs to be a text type (%r)" % (text, text_type)) - - if is_unix: - # First we go to bytes so we can be sure we have a valid source. - # Theoretically we should fail here in case we have a non-unicode - # encoding. But this would make everything complicated and there is - # no good way to handle a failure from the user side. Instead - # fall back to utf-8 which is the most likely the right choice in - # a mis-configured environment - encoding = _encoding - try: - path = text.encode(encoding, _surrogatepass) - except UnicodeEncodeError: - path = text.encode("utf-8", _surrogatepass) - - if b"\x00" in path: - path = path.replace(b"\x00", fsn2bytes(_fsnative(u"\uFFFD"), None)) - - if PY3: - return path.decode(_encoding, "surrogateescape") - return path - else: - if u"\x00" in text: - text = text.replace(u"\x00", u"\uFFFD") - text = fsn2norm(text) - return text - - -def _create_fsnative(type_): - # a bit of magic to make fsnative(u"foo") and isinstance(path, fsnative) - # work - - class meta(type): - - def __instancecheck__(self, instance): - return _typecheck_fsnative(instance) - - def __subclasscheck__(self, subclass): - return issubclass(subclass, type_) - - class impl(object): - """fsnative(text=u"") - - Args: - text (text): The text to convert to a path - Returns: - fsnative: The new path. - Raises: - TypeError: In case something other then `text` has been passed - - This type is a virtual base class for the real path type. - Instantiating it returns an instance of the real path type and it - overrides instance and subclass checks so that `isinstance` and - `issubclass` checks work: - - :: - - isinstance(fsnative(u"foo"), fsnative) == True - issubclass(type(fsnative(u"foo")), fsnative) == True - - The real returned type is: - - - **Python 2 + Windows:** :obj:`python:unicode`, with ``surrogates``, - without ``null`` - - **Python 2 + Unix:** :obj:`python:str`, without ``null`` - - **Python 3 + Windows:** :obj:`python3:str`, with ``surrogates``, - without ``null`` - - **Python 3 + Unix:** :obj:`python3:str`, with ``surrogates``, without - ``null``, without code points not encodable with the locale encoding - - Constructing a `fsnative` can't fail. - - Passing a `fsnative` to :func:`open` will never lead to `ValueError` - or `TypeError`. - - Any operation on `fsnative` can also use the `str` type, as long as - the `str` only contains ASCII and no NULL. - """ - - def __new__(cls, text=u""): - return _fsnative(text) - - new_type = meta("fsnative", (object,), dict(impl.__dict__)) - new_type.__module__ = "senf" - return new_type - - -fsnative_type = text_type if is_win or PY3 else bytes -fsnative = _create_fsnative(fsnative_type) - - -def _typecheck_fsnative(path): - """ - Args: - path (object) - Returns: - bool: if path is a fsnative - """ - - if not isinstance(path, fsnative_type): - return False - - if PY3 or is_win: - if u"\x00" in path: - return False - - if is_unix: - try: - path.encode(_encoding, "surrogateescape") - except UnicodeEncodeError: - return False - elif b"\x00" in path: - return False - - return True - - -def _fsn2native(path): - """ - Args: - path (fsnative) - Returns: - `text` on Windows, `bytes` on Unix - Raises: - TypeError: in case the type is wrong or the ´str` on Py3 + Unix - can't be converted to `bytes` - - This helper allows to validate the type and content of a path. - To reduce overhead the encoded value for Py3 + Unix is returned so - it can be reused. - """ - - if not isinstance(path, fsnative_type): - raise TypeError("path needs to be %s, not %s" % ( - fsnative_type.__name__, type(path).__name__)) - - if is_unix: - if PY3: - try: - path = path.encode(_encoding, "surrogateescape") - except UnicodeEncodeError: - # This look more like ValueError, but raising only one error - # makes things simpler... also one could say str + surrogates - # is its own type - raise TypeError( - "path contained Unicode code points not valid in" - "the current path encoding. To create a valid " - "path from Unicode use text2fsn()") - - if b"\x00" in path: - raise TypeError("fsnative can't contain nulls") - else: - if u"\x00" in path: - raise TypeError("fsnative can't contain nulls") - - return path - - -def _get_encoding(): - """The encoding used for paths, argv, environ, stdout and stdin""" - - encoding = sys.getfilesystemencoding() - if encoding is None: - if is_darwin: - encoding = "utf-8" - elif is_win: - encoding = "mbcs" - else: - encoding = "ascii" - encoding = _normalize_codec(encoding) - return encoding - - -_encoding = _get_encoding() - - -def path2fsn(path): - """ - Args: - path (pathlike): The path to convert - Returns: - `fsnative` - Raises: - TypeError: In case the type can't be converted to a `fsnative` - ValueError: In case conversion fails - - Returns a `fsnative` path for a `pathlike`. - """ - - # allow mbcs str on py2+win and bytes on py3 - if PY2: - if is_win: - if isinstance(path, bytes): - path = path.decode(_encoding) - else: - if isinstance(path, text_type): - path = path.encode(_encoding) - if "\x00" in path: - raise ValueError("embedded null") - else: - path = getattr(os, "fspath", lambda x: x)(path) - if isinstance(path, bytes): - if b"\x00" in path: - raise ValueError("embedded null") - path = path.decode(_encoding, "surrogateescape") - elif is_unix and isinstance(path, str): - # make sure we can encode it and this is not just some random - # unicode string - data = path.encode(_encoding, "surrogateescape") - if b"\x00" in data: - raise ValueError("embedded null") - path = fsn2norm(path) - else: - if u"\x00" in path: - raise ValueError("embedded null") - path = fsn2norm(path) - - if not isinstance(path, fsnative_type): - raise TypeError("path needs to be %s", fsnative_type.__name__) - - return path - - -def fsn2text(path, strict=False): - """ - Args: - path (fsnative): The path to convert - strict (bool): Fail in case the conversion is not reversible - Returns: - `text` - Raises: - TypeError: In case no `fsnative` has been passed - ValueError: In case ``strict`` was True and the conversion failed - - Converts a `fsnative` path to `text`. - - Can be used to pass a path to some unicode API, like for example a GUI - toolkit. - - If ``strict`` is True the conversion will fail in case it is not - reversible. This can be useful for converting program arguments that are - supposed to be text and erroring out in case they are not. - - Encoding with a Unicode encoding will always succeed with the result. - """ - - path = _fsn2native(path) - - errors = "strict" if strict else "replace" - - if is_win: - return path.encode("utf-16-le", _surrogatepass).decode("utf-16-le", - errors) - else: - return path.decode(_encoding, errors) - - -def text2fsn(text): - """ - Args: - text (text): The text to convert - Returns: - `fsnative` - Raises: - TypeError: In case no `text` has been passed - - Takes `text` and converts it to a `fsnative`. - - This operation is not reversible and can't fail. - """ - - return fsnative(text) - - -def fsn2bytes(path, encoding="utf-8"): - """ - Args: - path (fsnative): The path to convert - encoding (`str`): encoding used for Windows - Returns: - `bytes` - Raises: - TypeError: If no `fsnative` path is passed - ValueError: If encoding fails or the encoding is invalid - - Converts a `fsnative` path to `bytes`. - - The passed *encoding* is only used on platforms where paths are not - associated with an encoding (Windows for example). - - For Windows paths, lone surrogates will be encoded like normal code points - and surrogate pairs will be merged before encoding. In case of ``utf-8`` - or ``utf-16-le`` this is equal to the `WTF-8 and WTF-16 encoding - `__. - """ - - path = _fsn2native(path) - - if is_win: - if encoding is None: - raise ValueError("invalid encoding %r" % encoding) - - try: - return _winpath2bytes(path, encoding) - except LookupError: - raise ValueError("invalid encoding %r" % encoding) - else: - return path - - -def bytes2fsn(data, encoding="utf-8"): - """ - Args: - data (bytes): The data to convert - encoding (`str`): encoding used for Windows - Returns: - `fsnative` - Raises: - TypeError: If no `bytes` path is passed - ValueError: If decoding fails or the encoding is invalid - - Turns `bytes` to a `fsnative` path. - - The passed *encoding* is only used on platforms where paths are not - associated with an encoding (Windows for example). - - For Windows paths ``WTF-8`` is accepted if ``utf-8`` is used and - ``WTF-16`` accepted if ``utf-16-le`` is used. - """ - - if not isinstance(data, bytes): - raise TypeError("data needs to be bytes") - - if is_win: - if encoding is None: - raise ValueError("invalid encoding %r" % encoding) - try: - path = _decode_surrogatepass(data, encoding) - except LookupError: - raise ValueError("invalid encoding %r" % encoding) - if u"\x00" in path: - raise ValueError("contains nulls") - return path - else: - if b"\x00" in data: - raise ValueError("contains nulls") - if PY2: - return data - else: - return data.decode(_encoding, "surrogateescape") - - -def uri2fsn(uri): - """ - Args: - uri (`text` or :obj:`python:str`): A file URI - Returns: - `fsnative` - Raises: - TypeError: In case an invalid type is passed - ValueError: In case the URI isn't a valid file URI - - Takes a file URI and returns a `fsnative` path - """ - - if PY2: - if isinstance(uri, text_type): - uri = uri.encode("utf-8") - if not isinstance(uri, bytes): - raise TypeError("uri needs to be ascii str or unicode") - else: - if not isinstance(uri, str): - raise TypeError("uri needs to be str") - - parsed = urlparse(uri) - scheme = parsed.scheme - netloc = parsed.netloc - path = parsed.path - - if scheme != "file": - raise ValueError("Not a file URI: %r" % uri) - - if not path: - raise ValueError("Invalid file URI: %r" % uri) - - uri = urlunparse(parsed)[7:] - - if is_win: - try: - drive, rest = uri.split(":", 1) - except ValueError: - path = "" - rest = uri.replace("/", "\\") - else: - path = drive[-1] + ":" - rest = rest.replace("/", "\\") - if PY2: - path += unquote(rest) - else: - path += unquote(rest, encoding="utf-8", errors="surrogatepass") - if netloc: - path = "\\\\" + path - if PY2: - path = path.decode("utf-8") - if u"\x00" in path: - raise ValueError("embedded null") - return path - else: - if PY2: - path = unquote(uri) - else: - path = unquote(uri, encoding=_encoding, errors="surrogateescape") - if "\x00" in path: - raise ValueError("embedded null") - return path - - -def fsn2uri(path): - """ - Args: - path (fsnative): The path to convert to an URI - Returns: - `text`: An ASCII only URI - Raises: - TypeError: If no `fsnative` was passed - ValueError: If the path can't be converted - - Takes a `fsnative` path and returns a file URI. - - On Windows non-ASCII characters will be encoded using utf-8 and then - percent encoded. - """ - - path = _fsn2native(path) - - def _quote_path(path): - # RFC 2396 - path = quote(path, "/:@&=+$,") - if PY2: - path = path.decode("ascii") - return path - - if is_win: - buf = ctypes.create_unicode_buffer(winapi.INTERNET_MAX_URL_LENGTH) - length = winapi.DWORD(winapi.INTERNET_MAX_URL_LENGTH) - flags = 0 - try: - winapi.UrlCreateFromPathW(path, buf, ctypes.byref(length), flags) - except WindowsError as e: - raise ValueError(e) - uri = buf[:length.value] - - # For some reason UrlCreateFromPathW escapes some chars outside of - # ASCII and some not. Unquote and re-quote with utf-8. - if PY3: - # latin-1 maps code points directly to bytes, which is what we want - uri = unquote(uri, "latin-1") - else: - # Python 2 does what we want by default - uri = unquote(uri) - - return _quote_path(uri.encode("utf-8", _surrogatepass)) - - else: - return u"file://" + _quote_path(path) diff --git a/script.module.mutagen/lib/mutagen/_senf/_print.py b/script.module.mutagen/lib/mutagen/_senf/_print.py deleted file mode 100644 index 63c50fa52..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_print.py +++ /dev/null @@ -1,424 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import sys -import os -import ctypes -import re - -from ._fsnative import _encoding, is_win, is_unix, _surrogatepass, bytes2fsn -from ._compat import text_type, PY2, PY3 -from ._winansi import AnsiState, ansi_split -from . import _winapi as winapi - - -def print_(*objects, **kwargs): - """print_(*objects, sep=None, end=None, file=None, flush=False) - - Args: - objects (object): zero or more objects to print - sep (str): Object separator to use, defaults to ``" "`` - end (str): Trailing string to use, defaults to ``"\\n"``. - If end is ``"\\n"`` then `os.linesep` is used. - file (object): A file-like object, defaults to `sys.stdout` - flush (bool): If the file stream should be flushed - Raises: - EnvironmentError - - Like print(), but: - - * Supports printing filenames under Unix + Python 3 and Windows + Python 2 - * Emulates ANSI escape sequence support under Windows - * Never fails due to encoding/decoding errors. Tries hard to get everything - on screen as is, but will fall back to "?" if all fails. - - This does not conflict with ``colorama``, but will not use it on Windows. - """ - - sep = kwargs.get("sep") - sep = sep if sep is not None else " " - end = kwargs.get("end") - end = end if end is not None else "\n" - file = kwargs.get("file") - file = file if file is not None else sys.stdout - flush = bool(kwargs.get("flush", False)) - - if is_win: - _print_windows(objects, sep, end, file, flush) - else: - _print_unix(objects, sep, end, file, flush) - - -def _print_unix(objects, sep, end, file, flush): - """A print_() implementation which writes bytes""" - - encoding = _encoding - - if isinstance(sep, text_type): - sep = sep.encode(encoding, "replace") - if not isinstance(sep, bytes): - raise TypeError - - if isinstance(end, text_type): - end = end.encode(encoding, "replace") - if not isinstance(end, bytes): - raise TypeError - - if end == b"\n": - end = os.linesep - if PY3: - end = end.encode("ascii") - - parts = [] - for obj in objects: - if not isinstance(obj, text_type) and not isinstance(obj, bytes): - obj = text_type(obj) - if isinstance(obj, text_type): - if PY2: - obj = obj.encode(encoding, "replace") - else: - try: - obj = obj.encode(encoding, "surrogateescape") - except UnicodeEncodeError: - obj = obj.encode(encoding, "replace") - assert isinstance(obj, bytes) - parts.append(obj) - - data = sep.join(parts) + end - assert isinstance(data, bytes) - - file = getattr(file, "buffer", file) - - try: - file.write(data) - except TypeError: - if PY3: - # For StringIO, first try with surrogates - surr_data = data.decode(encoding, "surrogateescape") - try: - file.write(surr_data) - except (TypeError, ValueError): - file.write(data.decode(encoding, "replace")) - else: - # for file like objects with don't support bytes - file.write(data.decode(encoding, "replace")) - - if flush: - file.flush() - - -ansi_state = AnsiState() - - -def _print_windows(objects, sep, end, file, flush): - """The windows implementation of print_()""" - - h = winapi.INVALID_HANDLE_VALUE - - try: - fileno = file.fileno() - except (EnvironmentError, AttributeError): - pass - else: - if fileno == 1: - h = winapi.GetStdHandle(winapi.STD_OUTPUT_HANDLE) - elif fileno == 2: - h = winapi.GetStdHandle(winapi.STD_ERROR_HANDLE) - - encoding = _encoding - - parts = [] - for obj in objects: - if isinstance(obj, bytes): - obj = obj.decode(encoding, "replace") - if not isinstance(obj, text_type): - obj = text_type(obj) - parts.append(obj) - - if isinstance(sep, bytes): - sep = sep.decode(encoding, "replace") - if not isinstance(sep, text_type): - raise TypeError - - if isinstance(end, bytes): - end = end.decode(encoding, "replace") - if not isinstance(end, text_type): - raise TypeError - - if end == u"\n": - end = os.linesep - - text = sep.join(parts) + end - assert isinstance(text, text_type) - - is_console = True - if h == winapi.INVALID_HANDLE_VALUE: - is_console = False - else: - # get the default value - info = winapi.CONSOLE_SCREEN_BUFFER_INFO() - if not winapi.GetConsoleScreenBufferInfo(h, ctypes.byref(info)): - is_console = False - - if is_console: - # make sure we flush before we apply any console attributes - file.flush() - - # try to force a utf-8 code page, use the output CP if that fails - cp = winapi.GetConsoleOutputCP() - try: - encoding = "utf-8" - if winapi.SetConsoleOutputCP(65001) == 0: - encoding = None - - for is_ansi, part in ansi_split(text): - if is_ansi: - ansi_state.apply(h, part) - else: - if encoding is not None: - data = part.encode(encoding, _surrogatepass) - else: - data = _encode_codepage(cp, part) - os.write(fileno, data) - finally: - # reset the code page to what we had before - winapi.SetConsoleOutputCP(cp) - else: - # try writing bytes first, so in case of Python 2 StringIO we get - # the same type on all platforms - try: - file.write(text.encode("utf-8", _surrogatepass)) - except (TypeError, ValueError): - file.write(text) - - if flush: - file.flush() - - -def _readline_windows(): - """Raises OSError""" - - try: - fileno = sys.stdin.fileno() - except (EnvironmentError, AttributeError): - fileno = -1 - - # In case stdin is replaced, read from that - if fileno != 0: - return _readline_windows_fallback() - - h = winapi.GetStdHandle(winapi.STD_INPUT_HANDLE) - if h == winapi.INVALID_HANDLE_VALUE: - return _readline_windows_fallback() - - buf_size = 1024 - buf = ctypes.create_string_buffer(buf_size * ctypes.sizeof(winapi.WCHAR)) - read = winapi.DWORD() - - text = u"" - while True: - if winapi.ReadConsoleW( - h, buf, buf_size, ctypes.byref(read), None) == 0: - if not text: - return _readline_windows_fallback() - raise ctypes.WinError() - data = buf[:read.value * ctypes.sizeof(winapi.WCHAR)] - text += data.decode("utf-16-le", _surrogatepass) - if text.endswith(u"\r\n"): - return text[:-2] - - -def _decode_codepage(codepage, data): - """ - Args: - codepage (int) - data (bytes) - Returns: - `text` - - Decodes data using the given codepage. If some data can't be decoded - using the codepage it will not fail. - """ - - assert isinstance(data, bytes) - - if not data: - return u"" - - # get the required buffer length first - length = winapi.MultiByteToWideChar(codepage, 0, data, len(data), None, 0) - if length == 0: - raise ctypes.WinError() - - # now decode - buf = ctypes.create_unicode_buffer(length) - length = winapi.MultiByteToWideChar( - codepage, 0, data, len(data), buf, length) - if length == 0: - raise ctypes.WinError() - - return buf[:] - - -def _encode_codepage(codepage, text): - """ - Args: - codepage (int) - text (text) - Returns: - `bytes` - - Encode text using the given code page. Will not fail if a char - can't be encoded using that codepage. - """ - - assert isinstance(text, text_type) - - if not text: - return b"" - - size = (len(text.encode("utf-16-le", _surrogatepass)) // - ctypes.sizeof(winapi.WCHAR)) - - # get the required buffer size - length = winapi.WideCharToMultiByte( - codepage, 0, text, size, None, 0, None, None) - if length == 0: - raise ctypes.WinError() - - # decode to the buffer - buf = ctypes.create_string_buffer(length) - length = winapi.WideCharToMultiByte( - codepage, 0, text, size, buf, length, None, None) - if length == 0: - raise ctypes.WinError() - return buf[:length] - - -def _readline_windows_fallback(): - # In case reading from the console failed (maybe we get piped data) - # we assume the input was generated according to the output encoding. - # Got any better ideas? - assert is_win - cp = winapi.GetConsoleOutputCP() - data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n") - return _decode_codepage(cp, data) - - -def _readline_default(): - assert is_unix - data = getattr(sys.stdin, "buffer", sys.stdin).readline().rstrip(b"\r\n") - if PY3: - return data.decode(_encoding, "surrogateescape") - else: - return data - - -def _readline(): - if is_win: - return _readline_windows() - else: - return _readline_default() - - -def input_(prompt=None): - """ - Args: - prompt (object): Prints the passed object to stdout without - adding a trailing newline - Returns: - `fsnative` - Raises: - EnvironmentError - - Like :func:`python3:input` but returns a `fsnative` and allows printing - filenames as prompt to stdout. - - Use :func:`fsn2text` on the result if you just want to deal with text. - """ - - if prompt is not None: - print_(prompt, end="") - - return _readline() - - -def _get_file_name_for_handle(handle): - """(Windows only) Returns a file name for a file handle. - - Args: - handle (winapi.HANDLE) - Returns: - `text` or `None` if no file name could be retrieved. - """ - - assert is_win - assert handle != winapi.INVALID_HANDLE_VALUE - - size = winapi.FILE_NAME_INFO.FileName.offset + \ - winapi.MAX_PATH * ctypes.sizeof(winapi.WCHAR) - buf = ctypes.create_string_buffer(size) - - if winapi.GetFileInformationByHandleEx is None: - # Windows XP - return None - - status = winapi.GetFileInformationByHandleEx( - handle, winapi.FileNameInfo, buf, size) - if status == 0: - return None - - name_info = ctypes.cast( - buf, ctypes.POINTER(winapi.FILE_NAME_INFO)).contents - offset = winapi.FILE_NAME_INFO.FileName.offset - data = buf[offset:offset + name_info.FileNameLength] - return bytes2fsn(data, "utf-16-le") - - -def supports_ansi_escape_codes(fd): - """Returns whether the output device is capable of interpreting ANSI escape - codes when :func:`print_` is used. - - Args: - fd (int): file descriptor (e.g. ``sys.stdout.fileno()``) - Returns: - `bool` - """ - - if os.isatty(fd): - return True - - if not is_win: - return False - - # Check for cygwin/msys terminal - handle = winapi._get_osfhandle(fd) - if handle == winapi.INVALID_HANDLE_VALUE: - return False - - if winapi.GetFileType(handle) != winapi.FILE_TYPE_PIPE: - return False - - file_name = _get_file_name_for_handle(handle) - match = re.match( - "^\\\\(cygwin|msys)-[a-z0-9]+-pty[0-9]+-(from|to)-master$", file_name) - return match is not None diff --git a/script.module.mutagen/lib/mutagen/_senf/_stdlib.py b/script.module.mutagen/lib/mutagen/_senf/_stdlib.py deleted file mode 100644 index f3193d337..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_stdlib.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import re -import os - -from ._fsnative import path2fsn, fsnative, is_win -from ._compat import PY2 -from ._environ import environ - - -sep = path2fsn(os.sep) -pathsep = path2fsn(os.pathsep) -curdir = path2fsn(os.curdir) -pardir = path2fsn(os.pardir) -altsep = path2fsn(os.altsep) if os.altsep is not None else None -extsep = path2fsn(os.extsep) -devnull = path2fsn(os.devnull) -defpath = path2fsn(os.defpath) - - -def getcwd(): - """Like `os.getcwd` but returns a `fsnative` path - - Returns: - `fsnative` - """ - - if is_win and PY2: - return os.getcwdu() - return os.getcwd() - - -def _get_userdir(user=None): - """Returns the user dir or None""" - - if user is not None and not isinstance(user, fsnative): - raise TypeError - - if is_win: - if "HOME" in environ: - path = environ["HOME"] - elif "USERPROFILE" in environ: - path = environ["USERPROFILE"] - elif "HOMEPATH" in environ and "HOMEDRIVE" in environ: - path = os.path.join(environ["HOMEDRIVE"], environ["HOMEPATH"]) - else: - return - - if user is None: - return path - else: - return os.path.join(os.path.dirname(path), user) - else: - import pwd - - if user is None: - if "HOME" in environ: - return environ["HOME"] - else: - try: - return path2fsn(pwd.getpwuid(os.getuid()).pw_dir) - except KeyError: - return - else: - try: - return path2fsn(pwd.getpwnam(user).pw_dir) - except KeyError: - return - - -def expanduser(path): - """ - Args: - path (pathlike): A path to expand - Returns: - `fsnative` - - Like :func:`python:os.path.expanduser` but supports unicode home - directories under Windows + Python 2 and always returns a `fsnative`. - """ - - path = path2fsn(path) - - if path == "~": - return _get_userdir() - elif path.startswith("~" + sep) or ( - altsep is not None and path.startswith("~" + altsep)): - userdir = _get_userdir() - if userdir is None: - return path - return userdir + path[1:] - elif path.startswith("~"): - sep_index = path.find(sep) - if altsep is not None: - alt_index = path.find(altsep) - if alt_index != -1 and alt_index < sep_index: - sep_index = alt_index - - if sep_index == -1: - user = path[1:] - rest = "" - else: - user = path[1:sep_index] - rest = path[sep_index:] - - userdir = _get_userdir(user) - if userdir is not None: - return userdir + rest - else: - return path - else: - return path - - -def expandvars(path): - """ - Args: - path (pathlike): A path to expand - Returns: - `fsnative` - - Like :func:`python:os.path.expandvars` but supports unicode under Windows - + Python 2 and always returns a `fsnative`. - """ - - path = path2fsn(path) - - def repl_func(match): - return environ.get(match.group(1), match.group(0)) - - path = re.compile(r"\$(\w+)", flags=re.UNICODE).sub(repl_func, path) - if os.name == "nt": - path = re.sub(r"%([^%]+)%", repl_func, path) - return re.sub(r"\$\{([^\}]+)\}", repl_func, path) diff --git a/script.module.mutagen/lib/mutagen/_senf/_temp.py b/script.module.mutagen/lib/mutagen/_senf/_temp.py deleted file mode 100644 index d29b72176..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_temp.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import tempfile - -from ._fsnative import path2fsn, fsnative - - -def gettempdir(): - """ - Returns: - `fsnative` - - Like :func:`python3:tempfile.gettempdir`, but always returns a `fsnative` - path - """ - - # FIXME: I don't want to reimplement all that logic, reading env vars etc. - # At least for the default it works. - return path2fsn(tempfile.gettempdir()) - - -def gettempprefix(): - """ - Returns: - `fsnative` - - Like :func:`python3:tempfile.gettempprefix`, but always returns a - `fsnative` path - """ - - return path2fsn(tempfile.gettempprefix()) - - -def mkstemp(suffix=None, prefix=None, dir=None, text=False): - """ - Args: - suffix (`pathlike` or `None`): suffix or `None` to use the default - prefix (`pathlike` or `None`): prefix or `None` to use the default - dir (`pathlike` or `None`): temp dir or `None` to use the default - text (bool): if the file should be opened in text mode - Returns: - Tuple[`int`, `fsnative`]: - A tuple containing the file descriptor and the file path - Raises: - EnvironmentError - - Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` - path. - """ - - suffix = fsnative() if suffix is None else path2fsn(suffix) - prefix = gettempprefix() if prefix is None else path2fsn(prefix) - dir = gettempdir() if dir is None else path2fsn(dir) - - return tempfile.mkstemp(suffix, prefix, dir, text) - - -def mkdtemp(suffix=None, prefix=None, dir=None): - """ - Args: - suffix (`pathlike` or `None`): suffix or `None` to use the default - prefix (`pathlike` or `None`): prefix or `None` to use the default - dir (`pathlike` or `None`): temp dir or `None` to use the default - Returns: - `fsnative`: A path to a directory - Raises: - EnvironmentError - - Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path. - """ - - suffix = fsnative() if suffix is None else path2fsn(suffix) - prefix = gettempprefix() if prefix is None else path2fsn(prefix) - dir = gettempdir() if dir is None else path2fsn(dir) - - return tempfile.mkdtemp(suffix, prefix, dir) diff --git a/script.module.mutagen/lib/mutagen/_senf/_winansi.py b/script.module.mutagen/lib/mutagen/_senf/_winansi.py deleted file mode 100644 index fbbc1c222..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_winansi.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ctypes -import re -import atexit - -from . import _winapi as winapi - - -def ansi_parse(code): - """Returns command, (args)""" - - return code[-1:], tuple([int(v or "0") for v in code[2:-1].split(";")]) - - -def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")): - """Yields (is_ansi, text)""" - - for part in _re.split(text): - if part: - yield (bool(_re.match(part)), part) - - -class AnsiCommand(object): - TEXT = "m" - - MOVE_UP = "A" - MOVE_DOWN = "B" - MOVE_FORWARD = "C" - MOVE_BACKWARD = "D" - - SET_POS = "H" - SET_POS_ALT = "f" - - SAVE_POS = "s" - RESTORE_POS = "u" - - -class TextAction(object): - RESET_ALL = 0 - - SET_BOLD = 1 - SET_DIM = 2 - SET_ITALIC = 3 - SET_UNDERLINE = 4 - SET_BLINK = 5 - SET_BLINK_FAST = 6 - SET_REVERSE = 7 - SET_HIDDEN = 8 - - RESET_BOLD = 21 - RESET_DIM = 22 - RESET_ITALIC = 23 - RESET_UNDERLINE = 24 - RESET_BLINK = 25 - RESET_BLINK_FAST = 26 - RESET_REVERSE = 27 - RESET_HIDDEN = 28 - - FG_BLACK = 30 - FG_RED = 31 - FG_GREEN = 32 - FG_YELLOW = 33 - FG_BLUE = 34 - FG_MAGENTA = 35 - FG_CYAN = 36 - FG_WHITE = 37 - - FG_DEFAULT = 39 - - FG_LIGHT_BLACK = 90 - FG_LIGHT_RED = 91 - FG_LIGHT_GREEN = 92 - FG_LIGHT_YELLOW = 93 - FG_LIGHT_BLUE = 94 - FG_LIGHT_MAGENTA = 95 - FG_LIGHT_CYAN = 96 - FG_LIGHT_WHITE = 97 - - BG_BLACK = 40 - BG_RED = 41 - BG_GREEN = 42 - BG_YELLOW = 43 - BG_BLUE = 44 - BG_MAGENTA = 45 - BG_CYAN = 46 - BG_WHITE = 47 - - BG_DEFAULT = 49 - - BG_LIGHT_BLACK = 100 - BG_LIGHT_RED = 101 - BG_LIGHT_GREEN = 102 - BG_LIGHT_YELLOW = 103 - BG_LIGHT_BLUE = 104 - BG_LIGHT_MAGENTA = 105 - BG_LIGHT_CYAN = 106 - BG_LIGHT_WHITE = 107 - - -class AnsiState(object): - - def __init__(self): - self.default_attrs = None - - self.bold = False - self.bg_light = False - self.fg_light = False - - self.saved_pos = (0, 0) - - def do_text_action(self, attrs, action): - # In case the external state has changed, apply it it to ours. - # Mostly the first time this is called. - if attrs & winapi.FOREGROUND_INTENSITY and not self.fg_light \ - and not self.bold: - self.fg_light = True - if attrs & winapi.BACKGROUND_INTENSITY and not self.bg_light: - self.bg_light = True - - dark_fg = { - TextAction.FG_BLACK: 0, - TextAction.FG_RED: winapi.FOREGROUND_RED, - TextAction.FG_GREEN: winapi.FOREGROUND_GREEN, - TextAction.FG_YELLOW: - winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED, - TextAction.FG_BLUE: winapi.FOREGROUND_BLUE, - TextAction.FG_MAGENTA: winapi.FOREGROUND_BLUE | - winapi.FOREGROUND_RED, - TextAction.FG_CYAN: - winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN, - TextAction.FG_WHITE: - winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN | - winapi.FOREGROUND_RED, - } - - dark_bg = { - TextAction.BG_BLACK: 0, - TextAction.BG_RED: winapi.BACKGROUND_RED, - TextAction.BG_GREEN: winapi.BACKGROUND_GREEN, - TextAction.BG_YELLOW: - winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED, - TextAction.BG_BLUE: winapi.BACKGROUND_BLUE, - TextAction.BG_MAGENTA: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED, - TextAction.BG_CYAN: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN, - TextAction.BG_WHITE: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN | - winapi.BACKGROUND_RED, - } - - light_fg = { - TextAction.FG_LIGHT_BLACK: 0, - TextAction.FG_LIGHT_RED: winapi.FOREGROUND_RED, - TextAction.FG_LIGHT_GREEN: winapi.FOREGROUND_GREEN, - TextAction.FG_LIGHT_YELLOW: - winapi.FOREGROUND_GREEN | winapi.FOREGROUND_RED, - TextAction.FG_LIGHT_BLUE: winapi.FOREGROUND_BLUE, - TextAction.FG_LIGHT_MAGENTA: - winapi.FOREGROUND_BLUE | winapi.FOREGROUND_RED, - TextAction.FG_LIGHT_CYAN: - winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN, - TextAction.FG_LIGHT_WHITE: - winapi.FOREGROUND_BLUE | winapi.FOREGROUND_GREEN | - winapi.FOREGROUND_RED, - } - - light_bg = { - TextAction.BG_LIGHT_BLACK: 0, - TextAction.BG_LIGHT_RED: winapi.BACKGROUND_RED, - TextAction.BG_LIGHT_GREEN: winapi.BACKGROUND_GREEN, - TextAction.BG_LIGHT_YELLOW: - winapi.BACKGROUND_GREEN | winapi.BACKGROUND_RED, - TextAction.BG_LIGHT_BLUE: winapi.BACKGROUND_BLUE, - TextAction.BG_LIGHT_MAGENTA: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_RED, - TextAction.BG_LIGHT_CYAN: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN, - TextAction.BG_LIGHT_WHITE: - winapi.BACKGROUND_BLUE | winapi.BACKGROUND_GREEN | - winapi.BACKGROUND_RED, - } - - if action == TextAction.RESET_ALL: - attrs = self.default_attrs - self.bold = self.fg_light = self.bg_light = False - elif action == TextAction.SET_BOLD: - self.bold = True - elif action == TextAction.RESET_BOLD: - self.bold = False - elif action == TextAction.SET_DIM: - self.bold = False - elif action == TextAction.SET_REVERSE: - attrs |= winapi.COMMON_LVB_REVERSE_VIDEO - elif action == TextAction.RESET_REVERSE: - attrs &= ~winapi.COMMON_LVB_REVERSE_VIDEO - elif action == TextAction.SET_UNDERLINE: - attrs |= winapi.COMMON_LVB_UNDERSCORE - elif action == TextAction.RESET_UNDERLINE: - attrs &= ~winapi.COMMON_LVB_UNDERSCORE - elif action == TextAction.FG_DEFAULT: - attrs = (attrs & ~0xF) | (self.default_attrs & 0xF) - self.fg_light = False - elif action == TextAction.BG_DEFAULT: - attrs = (attrs & ~0xF0) | (self.default_attrs & 0xF0) - self.bg_light = False - elif action in dark_fg: - attrs = (attrs & ~0xF) | dark_fg[action] - self.fg_light = False - elif action in dark_bg: - attrs = (attrs & ~0xF0) | dark_bg[action] - self.bg_light = False - elif action in light_fg: - attrs = (attrs & ~0xF) | light_fg[action] - self.fg_light = True - elif action in light_bg: - attrs = (attrs & ~0xF0) | light_bg[action] - self.bg_light = True - - if self.fg_light or self.bold: - attrs |= winapi.FOREGROUND_INTENSITY - else: - attrs &= ~winapi.FOREGROUND_INTENSITY - - if self.bg_light: - attrs |= winapi.BACKGROUND_INTENSITY - else: - attrs &= ~winapi.BACKGROUND_INTENSITY - - return attrs - - def apply(self, handle, code): - buffer_info = winapi.CONSOLE_SCREEN_BUFFER_INFO() - if not winapi.GetConsoleScreenBufferInfo(handle, - ctypes.byref(buffer_info)): - return - - attrs = buffer_info.wAttributes - - # We take the first attrs we see as default - if self.default_attrs is None: - self.default_attrs = attrs - # Make sure that like with linux terminals the program doesn't - # affect the prompt after it exits - atexit.register( - winapi.SetConsoleTextAttribute, handle, self.default_attrs) - - cmd, args = ansi_parse(code) - if cmd == AnsiCommand.TEXT: - for action in args: - attrs = self.do_text_action(attrs, action) - winapi.SetConsoleTextAttribute(handle, attrs) - elif cmd in (AnsiCommand.MOVE_UP, AnsiCommand.MOVE_DOWN, - AnsiCommand.MOVE_FORWARD, AnsiCommand.MOVE_BACKWARD): - - coord = buffer_info.dwCursorPosition - x, y = coord.X, coord.Y - - amount = max(args[0], 1) - - if cmd == AnsiCommand.MOVE_UP: - y -= amount - elif cmd == AnsiCommand.MOVE_DOWN: - y += amount - elif cmd == AnsiCommand.MOVE_FORWARD: - x += amount - elif cmd == AnsiCommand.MOVE_BACKWARD: - x -= amount - - x = max(x, 0) - y = max(y, 0) - winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y)) - elif cmd in (AnsiCommand.SET_POS, AnsiCommand.SET_POS_ALT): - args = list(args) - while len(args) < 2: - args.append(0) - x, y = args[:2] - - win_rect = buffer_info.srWindow - x += win_rect.Left - 1 - y += win_rect.Top - 1 - - x = max(x, 0) - y = max(y, 0) - winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y)) - elif cmd == AnsiCommand.SAVE_POS: - win_rect = buffer_info.srWindow - coord = buffer_info.dwCursorPosition - x, y = coord.X, coord.Y - x -= win_rect.Left - y -= win_rect.Top - self.saved_pos = (x, y) - elif cmd == AnsiCommand.RESTORE_POS: - win_rect = buffer_info.srWindow - x, y = self.saved_pos - x += win_rect.Left - y += win_rect.Top - winapi.SetConsoleCursorPosition(handle, winapi.COORD(x, y)) diff --git a/script.module.mutagen/lib/mutagen/_senf/_winapi.py b/script.module.mutagen/lib/mutagen/_senf/_winapi.py deleted file mode 100644 index 5e0f78542..000000000 --- a/script.module.mutagen/lib/mutagen/_senf/_winapi.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 Christoph Reiter -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ctypes -from ctypes import WinDLL, CDLL, wintypes - - -shell32 = WinDLL("shell32") -kernel32 = WinDLL("kernel32") -shlwapi = WinDLL("shlwapi") -msvcrt = CDLL("msvcrt") - -GetCommandLineW = kernel32.GetCommandLineW -GetCommandLineW.argtypes = [] -GetCommandLineW.restype = wintypes.LPCWSTR - -CommandLineToArgvW = shell32.CommandLineToArgvW -CommandLineToArgvW.argtypes = [ - wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)] -CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR) - -LocalFree = kernel32.LocalFree -LocalFree.argtypes = [wintypes.HLOCAL] -LocalFree.restype = wintypes.HLOCAL - -# https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751.aspx -LPCTSTR = ctypes.c_wchar_p -LPWSTR = wintypes.LPWSTR -LPCWSTR = ctypes.c_wchar_p -LPTSTR = LPWSTR -PCWSTR = ctypes.c_wchar_p -PCTSTR = PCWSTR -PWSTR = ctypes.c_wchar_p -PTSTR = PWSTR -LPVOID = wintypes.LPVOID -WCHAR = wintypes.WCHAR -LPSTR = ctypes.c_char_p - -BOOL = wintypes.BOOL -LPBOOL = ctypes.POINTER(BOOL) -UINT = wintypes.UINT -WORD = wintypes.WORD -DWORD = wintypes.DWORD -SHORT = wintypes.SHORT -HANDLE = wintypes.HANDLE -ULONG = wintypes.ULONG -LPCSTR = wintypes.LPCSTR - -STD_INPUT_HANDLE = DWORD(-10) -STD_OUTPUT_HANDLE = DWORD(-11) -STD_ERROR_HANDLE = DWORD(-12) - -INVALID_HANDLE_VALUE = wintypes.HANDLE(-1).value - -INTERNET_MAX_SCHEME_LENGTH = 32 -INTERNET_MAX_PATH_LENGTH = 2048 -INTERNET_MAX_URL_LENGTH = ( - INTERNET_MAX_SCHEME_LENGTH + len("://") + INTERNET_MAX_PATH_LENGTH) - -FOREGROUND_BLUE = 0x0001 -FOREGROUND_GREEN = 0x0002 -FOREGROUND_RED = 0x0004 -FOREGROUND_INTENSITY = 0x0008 - -BACKGROUND_BLUE = 0x0010 -BACKGROUND_GREEN = 0x0020 -BACKGROUND_RED = 0x0040 -BACKGROUND_INTENSITY = 0x0080 - -COMMON_LVB_REVERSE_VIDEO = 0x4000 -COMMON_LVB_UNDERSCORE = 0x8000 - -UrlCreateFromPathW = shlwapi.UrlCreateFromPathW -UrlCreateFromPathW.argtypes = [ - PCTSTR, PTSTR, ctypes.POINTER(DWORD), DWORD] -UrlCreateFromPathW.restype = ctypes.HRESULT - -SetEnvironmentVariableW = kernel32.SetEnvironmentVariableW -SetEnvironmentVariableW.argtypes = [LPCTSTR, LPCTSTR] -SetEnvironmentVariableW.restype = wintypes.BOOL - -GetEnvironmentVariableW = kernel32.GetEnvironmentVariableW -GetEnvironmentVariableW.argtypes = [LPCTSTR, LPTSTR, DWORD] -GetEnvironmentVariableW.restype = DWORD - -GetEnvironmentStringsW = kernel32.GetEnvironmentStringsW -GetEnvironmentStringsW.argtypes = [] -GetEnvironmentStringsW.restype = ctypes.c_void_p - -FreeEnvironmentStringsW = kernel32.FreeEnvironmentStringsW -FreeEnvironmentStringsW.argtypes = [ctypes.c_void_p] -FreeEnvironmentStringsW.restype = ctypes.c_bool - -GetStdHandle = kernel32.GetStdHandle -GetStdHandle.argtypes = [DWORD] -GetStdHandle.restype = HANDLE - - -class COORD(ctypes.Structure): - - _fields_ = [ - ("X", SHORT), - ("Y", SHORT), - ] - - -class SMALL_RECT(ctypes.Structure): - - _fields_ = [ - ("Left", SHORT), - ("Top", SHORT), - ("Right", SHORT), - ("Bottom", SHORT), - ] - - -class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): - - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", WORD), - ("srWindow", SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - - -GetConsoleScreenBufferInfo = kernel32.GetConsoleScreenBufferInfo -GetConsoleScreenBufferInfo.argtypes = [ - HANDLE, ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)] -GetConsoleScreenBufferInfo.restype = BOOL - -GetConsoleOutputCP = kernel32.GetConsoleOutputCP -GetConsoleOutputCP.argtypes = [] -GetConsoleOutputCP.restype = UINT - -SetConsoleOutputCP = kernel32.SetConsoleOutputCP -SetConsoleOutputCP.argtypes = [UINT] -SetConsoleOutputCP.restype = BOOL - -GetConsoleCP = kernel32.GetConsoleCP -GetConsoleCP.argtypes = [] -GetConsoleCP.restype = UINT - -SetConsoleCP = kernel32.SetConsoleCP -SetConsoleCP.argtypes = [UINT] -SetConsoleCP.restype = BOOL - -SetConsoleTextAttribute = kernel32.SetConsoleTextAttribute -SetConsoleTextAttribute.argtypes = [HANDLE, WORD] -SetConsoleTextAttribute.restype = BOOL - -SetConsoleCursorPosition = kernel32.SetConsoleCursorPosition -SetConsoleCursorPosition.argtypes = [HANDLE, COORD] -SetConsoleCursorPosition.restype = BOOL - -ReadConsoleW = kernel32.ReadConsoleW -ReadConsoleW.argtypes = [HANDLE, LPVOID, DWORD, ctypes.POINTER(DWORD), LPVOID] -ReadConsoleW.restype = BOOL - -MultiByteToWideChar = kernel32.MultiByteToWideChar -MultiByteToWideChar.argtypes = [ - UINT, DWORD, LPCSTR, ctypes.c_int, LPWSTR, ctypes.c_int] -MultiByteToWideChar.restype = ctypes.c_int - -WideCharToMultiByte = kernel32.WideCharToMultiByte -WideCharToMultiByte.argtypes = [ - UINT, DWORD, LPCWSTR, ctypes.c_int, LPSTR, ctypes.c_int, LPCSTR, LPBOOL] -WideCharToMultiByte.restpye = ctypes.c_int - -MoveFileW = kernel32.MoveFileW -MoveFileW.argtypes = [LPCTSTR, LPCTSTR] -MoveFileW.restype = BOOL - -if hasattr(kernel32, "GetFileInformationByHandleEx"): - GetFileInformationByHandleEx = kernel32.GetFileInformationByHandleEx - GetFileInformationByHandleEx.argtypes = [ - HANDLE, ctypes.c_int, ctypes.c_void_p, DWORD] - GetFileInformationByHandleEx.restype = BOOL -else: - # Windows XP - GetFileInformationByHandleEx = None - -MAX_PATH = 260 -FileNameInfo = 2 - - -class FILE_NAME_INFO(ctypes.Structure): - _fields_ = [ - ("FileNameLength", DWORD), - ("FileName", WCHAR), - ] - - -_get_osfhandle = msvcrt._get_osfhandle -_get_osfhandle.argtypes = [ctypes.c_int] -_get_osfhandle.restype = HANDLE - -GetFileType = kernel32.GetFileType -GetFileType.argtypes = [HANDLE] -GetFileType.restype = DWORD - -FILE_TYPE_PIPE = 0x0003 diff --git a/script.module.mutagen/lib/mutagen/_tags.py b/script.module.mutagen/lib/mutagen/_tags.py index b64caa9a2..a236cf820 100644 --- a/script.module.mutagen/lib/mutagen/_tags.py +++ b/script.module.mutagen/lib/mutagen/_tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -37,11 +36,11 @@ def my_callback(info: PaddingInfo): size (`int`): The amount of data following the padding """ - def __init__(self, padding, size): + def __init__(self, padding: int, size: int): self.padding = padding self.size = size - def get_default_padding(self): + def get_default_padding(self) -> int: """The default implementation which tries to select a reasonable amount of padding and which might change in future versions. diff --git a/script.module.mutagen/lib/mutagen/_tools/__init__.py b/script.module.mutagen/lib/mutagen/_tools/__init__.py index 3e6b15565..94b5bb2f3 100644 --- a/script.module.mutagen/lib/mutagen/_tools/__init__.py +++ b/script.module.mutagen/lib/mutagen/_tools/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 Christoph Reiter # # This program is free software; you can redistribute it and/or modify diff --git a/script.module.mutagen/lib/mutagen/_tools/_util.py b/script.module.mutagen/lib/mutagen/_tools/_util.py index 4e050769e..513f39899 100644 --- a/script.module.mutagen/lib/mutagen/_tools/_util.py +++ b/script.module.mutagen/lib/mutagen/_tools/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 Christoph Reiter # # This program is free software; you can redistribute it and/or modify @@ -11,8 +10,7 @@ import contextlib import optparse -from mutagen._senf import print_ -from mutagen._compat import text_type, iterbytes +from mutagen._util import iterbytes def split_escape(string, sep, maxsplit=None, escape_char="\\"): @@ -25,7 +23,7 @@ def split_escape(string, sep, maxsplit=None, escape_char="\\"): assert len(escape_char) == 1 if isinstance(string, bytes): - if isinstance(escape_char, text_type): + if isinstance(escape_char, str): escape_char = escape_char.encode("ascii") iter_ = iterbytes else: @@ -88,8 +86,4 @@ def block(self): raise SystemExit("Aborted...") -class OptionParser(optparse.OptionParser): - """OptionParser subclass which supports printing Unicode under Windows""" - - def print_help(self, file=None): - print_(self.format_help(), file=file) +OptionParser = optparse.OptionParser diff --git a/script.module.mutagen/lib/mutagen/_tools/mid3cp.py b/script.module.mutagen/lib/mutagen/_tools/mid3cp.py index 1339548db..b48d285c2 100644 --- a/script.module.mutagen/lib/mutagen/_tools/mid3cp.py +++ b/script.module.mutagen/lib/mutagen/_tools/mid3cp.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 Marcus Sundman # # This program is free software; you can redistribute it and/or modify @@ -15,8 +14,6 @@ import mutagen import mutagen.id3 -from mutagen._senf import print_, argv -from mutagen._compat import text_type from ._util import SignalHandler, OptionParser @@ -25,11 +22,6 @@ _sig = SignalHandler() -def printerr(*args, **kwargs): - kwargs.setdefault("file", sys.stderr) - print_(*args, **kwargs) - - class ID3OptionParser(OptionParser): def __init__(self): mutagen_version = mutagen.version_string @@ -52,15 +44,15 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False): try: id3 = mutagen.id3.ID3(src, translate=False) except mutagen.id3.ID3NoHeaderError: - print_(u"No ID3 header found in ", src, file=sys.stderr) + print(u"No ID3 header found in ", src, file=sys.stderr) return 1 except Exception as err: - print_(str(err), file=sys.stderr) + print(str(err), file=sys.stderr) return 1 if verbose: - print_(u"File", src, u"contains:", file=sys.stderr) - print_(id3.pprint(), file=sys.stderr) + print(u"File", src, u"contains:", file=sys.stderr) + print(id3.pprint(), file=sys.stderr) for tag in excluded_tags: id3.delall(tag) @@ -72,7 +64,7 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False): # no need to merge pass except Exception as err: - print_(str(err), file=sys.stderr) + print(str(err), file=sys.stderr) return 1 else: for frame in id3.values(): @@ -91,12 +83,12 @@ def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False): try: id3.save(dst, v1=(2 if write_v1 else 0), v2_version=v2_version) except Exception as err: - print_(u"Error saving", dst, u":\n%s" % text_type(err), - file=sys.stderr) + print(u"Error saving", dst, u":\n%s" % str(err), + file=sys.stderr) return 1 else: if verbose: - print_(u"Successfully saved", dst, file=sys.stderr) + print(u"Successfully saved", dst, file=sys.stderr) return 0 @@ -120,12 +112,12 @@ def main(argv): (src, dst) = args if not os.path.isfile(src): - print_(u"File not found:", src, file=sys.stderr) + print(u"File not found:", src, file=sys.stderr) parser.print_help(file=sys.stderr) return 1 if not os.path.isfile(dst): - printerr(u"File not found:", dst, file=sys.stderr) + print(u"File not found:", dst, file=sys.stderr) parser.print_help(file=sys.stderr) return 1 @@ -139,4 +131,4 @@ def main(argv): def entry_point(): _sig.init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_tools/mid3iconv.py b/script.module.mutagen/lib/mutagen/_tools/mid3iconv.py index 554f6bb8f..b0669acfa 100644 --- a/script.module.mutagen/lib/mutagen/_tools/mid3iconv.py +++ b/script.module.mutagen/lib/mutagen/_tools/mid3iconv.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Emfox Zhou # # This program is free software; you can redistribute it and/or modify @@ -7,7 +6,7 @@ # (at your option) any later version. """ -ID3iconv is a Java based ID3 encoding convertor, here's the Python version. +ID3iconv is a Java based ID3 encoding converter, here's the Python version. """ import sys @@ -15,8 +14,6 @@ import mutagen import mutagen.id3 -from mutagen._senf import argv, print_, fsnative -from mutagen._compat import text_type from ._util import SignalHandler, OptionParser @@ -75,7 +72,7 @@ def conv(uni): for filename in filenames: with _sig.block(): if verbose != "quiet": - print_(u"Updating", filename) + print(u"Updating", filename) if has_id3v1(filename) and not noupdate and force_v1: mutagen.id3.delete(filename, False, True) @@ -84,10 +81,10 @@ def conv(uni): id3 = mutagen.id3.ID3(filename) except mutagen.id3.ID3NoHeaderError: if verbose != "quiet": - print_(u"No ID3 header found; skipping...") + print(u"No ID3 header found; skipping...") continue except Exception as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) continue for tag in filter(lambda t: t.startswith(("T", "COMM")), id3): @@ -111,7 +108,7 @@ def conv(uni): frame.encoding = 1 if verbose == "debug": - print_(id3.pprint()) + print(id3.pprint()) if not noupdate: if remove_v1: @@ -154,9 +151,9 @@ def main(argv): for i, arg in enumerate(argv): if arg == "-v1": - argv[i] = fsnative(u"--force-v1") + argv[i] = "--force-v1" elif arg == "-removev1": - argv[i] = fsnative(u"--remove-v1") + argv[i] = "--remove-v1" (options, args) = parser.parse_args(argv[1:]) @@ -168,4 +165,4 @@ def main(argv): def entry_point(): _sig.init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_tools/mid3v2.py b/script.module.mutagen/lib/mutagen/_tools/mid3v2.py index 0b6e83d21..b6949b761 100644 --- a/script.module.mutagen/lib/mutagen/_tools/mid3v2.py +++ b/script.module.mutagen/lib/mutagen/_tools/mid3v2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2005 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -8,6 +7,7 @@ """Pretend to be /usr/bin/id3v2 from id3lib, sort of.""" +import os import sys import codecs import mimetypes @@ -18,9 +18,6 @@ import mutagen import mutagen.id3 from mutagen.id3 import Encoding, PictureType -from mutagen._senf import fsnative, print_, argv, fsn2text, fsn2bytes, \ - bytes2fsn -from mutagen._compat import PY2, text_type from ._util import split_escape, SignalHandler, OptionParser @@ -58,7 +55,7 @@ def format_help(self, *args, **kwargs): def list_frames(option, opt, value, parser): items = mutagen.id3.Frames.items() for name, frame in sorted(items): - print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0])) + print(u" --%s %s" % (name, frame.__doc__.split("\n")[0])) raise SystemExit @@ -66,13 +63,13 @@ def list_frames_2_2(option, opt, value, parser): items = mutagen.id3.Frames_2_2.items() items.sort() for name, frame in items: - print_(u" --%s %s" % (name, frame.__doc__.split("\n")[0])) + print(u" --%s %s" % (name, frame.__doc__.split("\n")[0])) raise SystemExit def list_genres(option, opt, value, parser): for i, genre in enumerate(mutagen.id3.TCON.GENRES): - print_(u"%3d: %s" % (i, genre)) + print(u"%3d: %s" % (i, genre)) raise SystemExit @@ -80,7 +77,7 @@ def delete_tags(filenames, v1, v2): for filename in filenames: with _sig.block(): if verbose: - print_(u"deleting ID3 tag info in", filename, file=sys.stderr) + print(u"deleting ID3 tag info in", filename, file=sys.stderr) mutagen.id3.delete(filename, v1, v2) @@ -89,22 +86,22 @@ def delete_frames(deletes, filenames): try: deletes = frame_from_fsnative(deletes) except ValueError as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) frames = deletes.split(",") for filename in filenames: with _sig.block(): if verbose: - print_(u"deleting %s from" % deletes, filename, - file=sys.stderr) + print("deleting %s from" % deletes, filename, + file=sys.stderr) try: id3 = mutagen.id3.ID3(filename) except mutagen.id3.ID3NoHeaderError: if verbose: - print_(u"No ID3 header found; skipping.", file=sys.stderr) + print(u"No ID3 header found; skipping.", file=sys.stderr) except Exception as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) raise SystemExit(1) else: for frame in frames: @@ -117,40 +114,32 @@ def frame_from_fsnative(arg): or raises ValueError. """ - assert isinstance(arg, fsnative) - - text = fsn2text(arg, strict=True) - if PY2: - return text.encode("ascii") - else: - return text.encode("ascii").decode("ascii") + assert isinstance(arg, str) + return arg.encode("ascii").decode("ascii") def value_from_fsnative(arg, escape): - """Takes an item from argv and returns a text_type value without + """Takes an item from argv and returns a str value without surrogate escapes or raises ValueError. """ - assert isinstance(arg, fsnative) + assert isinstance(arg, str) if escape: - bytes_ = fsn2bytes(arg) - if PY2: - bytes_ = bytes_.decode("string_escape") - else: - # With py3.7 this has started to warn for invalid escapes, but we - # don't control the input so ignore it. - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - bytes_ = codecs.escape_decode(bytes_)[0] - arg = bytes2fsn(bytes_) - - text = fsn2text(arg, strict=True) + bytes_ = os.fsencode(arg) + # With py3.7 this has started to warn for invalid escapes, but we + # don't control the input so ignore it. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + bytes_ = codecs.escape_decode(bytes_)[0] + arg = os.fsdecode(bytes_) + + text = arg.encode("utf-8").decode("utf-8") return text def error(*args): - print_(*args, file=sys.stderr) + print(*args, file=sys.stderr) raise SystemExit(1) @@ -172,7 +161,7 @@ def write_files(edits, filenames, escape): try: frame = frame_from_fsnative(frame) except ValueError as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) assert isinstance(frame, str) @@ -182,9 +171,9 @@ def write_files(edits, filenames, escape): try: value = value_from_fsnative(value, escape) except ValueError as err: - error(u"%s: %s" % (frame, text_type(err))) + error(u"%s: %s" % (frame, str(err))) - assert isinstance(value, text_type) + assert isinstance(value, str) encoded_edits.append((frame, value)) edits = encoded_edits @@ -210,16 +199,16 @@ def write_files(edits, filenames, escape): for filename in filenames: with _sig.block(): if verbose: - print_(u"Writing", filename, file=sys.stderr) + print(u"Writing", filename, file=sys.stderr) try: id3 = mutagen.id3.ID3(filename) except mutagen.id3.ID3NoHeaderError: if verbose: - print_(u"No ID3 header found; creating a new tag", + print(u"No ID3 header found; creating a new tag", file=sys.stderr) id3 = mutagen.id3.ID3() except Exception as err: - print_(str(err), file=sys.stderr) + print(str(err), file=sys.stderr) continue for (frame, vlist) in edits.items(): if frame == "POPM": @@ -269,7 +258,7 @@ def write_files(edits, filenames, escape): with open(fn, "rb") as h: data = h.read() except IOError as e: - error(text_type(e)) + error(str(e)) frame = mutagen.id3.APIC(encoding=encoding, mime=mime, desc=desc, type=picture_type, data=data) @@ -343,31 +332,31 @@ def write_files(edits, filenames, escape): def list_tags(filenames): for filename in filenames: - print_("IDv2 tag info for", filename) + print("IDv2 tag info for", filename) try: id3 = mutagen.id3.ID3(filename, translate=False) except mutagen.id3.ID3NoHeaderError: - print_(u"No ID3 header found; skipping.") + print(u"No ID3 header found; skipping.") except Exception as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) raise SystemExit(1) else: - print_(id3.pprint()) + print(id3.pprint()) def list_tags_raw(filenames): for filename in filenames: - print_("Raw IDv2 tag info for", filename) + print("Raw IDv2 tag info for", filename) try: id3 = mutagen.id3.ID3(filename, translate=False) except mutagen.id3.ID3NoHeaderError: - print_(u"No ID3 header found; skipping.") + print(u"No ID3 header found; skipping.") except Exception as err: - print_(text_type(err), file=sys.stderr) + print(str(err), file=sys.stderr) raise SystemExit(1) else: for frame in id3.values(): - print_(text_type(repr(frame))) + print(str(repr(frame))) def main(argv): @@ -416,43 +405,43 @@ def main(argv): parser.add_option( "-a", "--artist", metavar='"ARTIST"', action="callback", help="Set the artist information", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TPE1"), + callback=lambda *args: args[3].edits.append(("--TPE1", args[2]))) parser.add_option( "-A", "--album", metavar='"ALBUM"', action="callback", help="Set the album title information", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TALB"), + callback=lambda *args: args[3].edits.append(("--TALB", args[2]))) parser.add_option( "-t", "--song", metavar='"SONG"', action="callback", help="Set the song title information", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TIT2"), + callback=lambda *args: args[3].edits.append(("--TIT2", args[2]))) parser.add_option( "-c", "--comment", metavar='"DESCRIPTION":"COMMENT":"LANGUAGE"', action="callback", help="Set the comment information", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--COMM"), + callback=lambda *args: args[3].edits.append(("--COMM", args[2]))) parser.add_option( "-p", "--picture", metavar='"FILENAME":"DESCRIPTION":"IMAGE-TYPE":"MIME-TYPE"', action="callback", help="Set the picture", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--APIC"), + callback=lambda *args: args[3].edits.append(("--APIC", args[2]))) parser.add_option( "-g", "--genre", metavar='"GENRE"', action="callback", help="Set the genre or genre number", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TCON"), + callback=lambda *args: args[3].edits.append(("--TCON", args[2]))) parser.add_option( "-y", "--year", "--date", metavar='YYYY[-MM-DD]', action="callback", help="Set the year/date", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TDRC"), + callback=lambda *args: args[3].edits.append(("--TDRC", args[2]))) parser.add_option( "-T", "--track", metavar='"num/num"', action="callback", help="Set the track number/(optional) total tracks", type="string", - callback=lambda *args: args[3].edits.append((fsnative(u"--TRCK"), + callback=lambda *args: args[3].edits.append(("--TRCK", args[2]))) for key, frame in mutagen.id3.Frames.items(): @@ -492,4 +481,4 @@ def main(argv): def entry_point(): _sig.init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_tools/moggsplit.py b/script.module.mutagen/lib/mutagen/_tools/moggsplit.py index 710f0dfeb..de3820607 100644 --- a/script.module.mutagen/lib/mutagen/_tools/moggsplit.py +++ b/script.module.mutagen/lib/mutagen/_tools/moggsplit.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -9,9 +8,9 @@ """Split a multiplex/chained Ogg file into its component parts.""" import os +import sys import mutagen.ogg -from mutagen._senf import argv from ._util import SignalHandler, OptionParser @@ -72,4 +71,4 @@ def main(argv): def entry_point(): _sig.init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_tools/mutagen_inspect.py b/script.module.mutagen/lib/mutagen/_tools/mutagen_inspect.py index 6bd6c6143..fac529a94 100644 --- a/script.module.mutagen/lib/mutagen/_tools/mutagen_inspect.py +++ b/script.module.mutagen/lib/mutagen/_tools/mutagen_inspect.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2005 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -8,8 +7,7 @@ """Full tag list for any given file.""" -from mutagen._senf import print_, argv -from mutagen._compat import text_type +import sys from ._util import SignalHandler, OptionParser @@ -20,7 +18,7 @@ def main(argv): from mutagen import File - parser = OptionParser() + parser = OptionParser(usage="usage: %prog [options] FILE [FILE...]") parser.add_option("--no-flac", help="Compatibility; does nothing.") parser.add_option("--no-mp3", help="Compatibility; does nothing.") parser.add_option("--no-apev2", help="Compatibility; does nothing.") @@ -30,16 +28,16 @@ def main(argv): raise SystemExit(parser.print_help() or 1) for filename in args: - print_(u"--", filename) + print(u"--", filename) try: - print_(u"-", File(filename).pprint()) + print(u"-", File(filename).pprint()) except AttributeError: - print_(u"- Unknown file type") + print(u"- Unknown file type") except Exception as err: - print_(text_type(err)) - print_(u"") + print(str(err)) + print(u"") def entry_point(): _sig.init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_tools/mutagen_pony.py b/script.module.mutagen/lib/mutagen/_tools/mutagen_pony.py index e4a496c73..30ee5502e 100644 --- a/script.module.mutagen/lib/mutagen/_tools/mutagen_pony.py +++ b/script.module.mutagen/lib/mutagen/_tools/mutagen_pony.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2005 Joe Wreschnig, Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -10,8 +9,6 @@ import sys import traceback -from mutagen._senf import print_, argv - from ._util import SignalHandler @@ -76,14 +73,14 @@ def __str__(self): else: strings.append("\nNo errors!") - return("\n".join(strings)) + return "\n".join(strings) def check_dir(path): from mutagen.mp3 import MP3 rep = Report(path) - print_(u"Scanning", path) + print(u"Scanning", path) for path, dirs, files in os.walk(path): files.sort() for fn in files: @@ -100,12 +97,12 @@ def check_dir(path): else: rep.success(mp3.tags) - print_(str(rep)) + print(str(rep)) def main(argv): if len(argv) == 1: - print_(u"Usage:", argv[0], u"directory ...") + print(u"Usage:", argv[0], u"directory ...") else: for path in argv[1:]: check_dir(path) @@ -113,4 +110,4 @@ def main(argv): def entry_point(): SignalHandler().init() - return main(argv) + return main(sys.argv) diff --git a/script.module.mutagen/lib/mutagen/_util.py b/script.module.mutagen/lib/mutagen/_util.py index 1332f9d3a..b99c7c78a 100644 --- a/script.module.mutagen/lib/mutagen/_util.py +++ b/script.module.mutagen/lib/mutagen/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -18,24 +17,41 @@ import errno import decimal from io import BytesIO - -try: - import mmap -except ImportError: - # Google App Engine has no mmap: - # https://github.com/quodlibet/mutagen/issues/286 - mmap = None +from typing import Tuple, List from collections import namedtuple from contextlib import contextmanager from functools import wraps from fnmatch import fnmatchcase -from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \ - izip, text_type, reraise +_DEFAULT_BUFFER_SIZE = 2 ** 20 + + +def endswith(text, end): + # useful for paths which can be both, str and bytes + if isinstance(text, str): + if not isinstance(end, str): + end = end.decode("ascii") + else: + if not isinstance(end, bytes): + end = end.encode("ascii") + return text.endswith(end) + + +def reraise(tp, value, tb): + raise tp(value).with_traceback(tb) -def intround(value): + +def bchr(x): + return bytes([x]) + + +def iterbytes(b): + return (bytes([v]) for v in b) + + +def intround(value: float) -> int: """Given a float returns a rounded int. Should give the same result on both Py2/3 """ @@ -44,13 +60,13 @@ def intround(value): value).to_integral_value(decimal.ROUND_HALF_EVEN)) -def is_fileobj(fileobj): +def is_fileobj(fileobj) -> bool: """Returns: bool: if an argument passed ot mutagen should be treated as a file object """ - return not (isinstance(fileobj, (text_type, bytes)) or + return not (isinstance(fileobj, (str, bytes)) or hasattr(fileobj, "__fspath__")) @@ -105,8 +121,8 @@ def fileobj_name(fileobj): """ value = getattr(fileobj, "name", u"") - if not isinstance(value, (text_type, bytes)): - value = text_type(value) + if not isinstance(value, (str, bytes)): + value = str(value) return value @@ -212,7 +228,7 @@ def _openfile(instance, filething, filename, fileobj, writable, create): fileobj = filething elif hasattr(filething, "__fspath__"): filename = filething.__fspath__() - if not isinstance(filename, (bytes, text_type)): + if not isinstance(filename, (bytes, str)): raise TypeError("expected __fspath__() to return a filename") else: filename = filething @@ -302,9 +318,6 @@ def hashable(cls): Needs a working __eq__ and __hash__ and will add a __ne__. """ - # py2 - assert "__hash__" in cls.__dict__ - # py3 assert cls.__dict__["__hash__"] is not None assert "__eq__" in cls.__dict__ @@ -340,8 +353,8 @@ class Foo(object): new_type.__module__ = cls.__module__ map_ = {} - for key, value in iteritems(d): - if key.upper() == key and isinstance(value, integer_types): + for key, value in d.items(): + if key.upper() == key and isinstance(value, int): value_instance = new_type(value) setattr(new_type, key, value_instance) map_[value] = key @@ -389,8 +402,8 @@ class Foo(object): new_type.__module__ = cls.__module__ map_ = {} - for key, value in iteritems(d): - if key.upper() == key and isinstance(value, integer_types): + for key, value in d.items(): + if key.upper() == key and isinstance(value, int): value_instance = new_type(value) setattr(new_type, key, value_instance) map_[value] = key @@ -403,7 +416,7 @@ def str_(self): matches.append("%s.%s" % (type(self).__name__, v)) value &= ~k if value != 0 or not matches: - matches.append(text_type(value)) + matches.append(str(value)) return " | ".join(matches) @@ -443,25 +456,13 @@ def __has_key(self, key): else: return True - if PY2: - has_key = __has_key - __contains__ = __has_key - if PY2: - iterkeys = lambda self: iter(self.keys()) - def values(self): return [self[k] for k in self.keys()] - if PY2: - itervalues = lambda self: iter(self.values()) - def items(self): - return list(izip(self.keys(), self.values())) - - if PY2: - iteritems = lambda s: iter(s.items()) + return list(zip(self.keys(), self.values())) def clear(self): for key in list(self.keys()): @@ -477,7 +478,7 @@ def pop(self, key, *args): return args[0] else: raise - del(self[key]) + del self[key] return value def popitem(self): @@ -539,7 +540,7 @@ def __setitem__(self, key, value): self.__dict[key] = value def __delitem__(self, key): - del(self.__dict[key]) + del self.__dict[key] def keys(self): return self.__dict.keys() @@ -591,7 +592,7 @@ def unpack_from(data, offset=0): funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack - for key, func in iteritems(funcs): + for key, func in funcs.items(): setattr(cls, key, staticmethod(func)) @@ -602,12 +603,11 @@ class cdata(object): uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0) """ - from struct import error - error = error + error = struct.error bitswap = b''.join( - chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8))) - for val in xrange(256)) + bchr(sum(((val >> i) & 1) << (7 - i) for i in range(8))) + for val in range(256)) test_bit = staticmethod(lambda value, n: bool((value >> n) & 1)) @@ -615,7 +615,7 @@ class cdata(object): _fill_cdata(cdata) -def get_size(fileobj): +def get_size(fileobj) -> int: """Returns the size of the file. The position when passed in will be preserved if no error occurs. @@ -635,7 +635,7 @@ def get_size(fileobj): fileobj.seek(old_pos, 0) -def read_full(fileobj, size): +def read_full(fileobj, size: int) -> None: """Like fileobj.read but raises IOError if not all requested data is returned. @@ -658,7 +658,7 @@ def read_full(fileobj, size): return data -def seek_end(fileobj, offset): +def seek_end(fileobj, offset: int) -> None: """Like fileobj.seek(-offset, 2), but will not try to go beyond the start Needed since file objects from BytesIO will not raise IOError and @@ -683,65 +683,7 @@ def seek_end(fileobj, offset): fileobj.seek(-offset, 2) -def mmap_move(fileobj, dest, src, count): - """Mmaps the file object if possible and moves 'count' data - from 'src' to 'dest'. All data has to be inside the file size - (enlarging the file through this function isn't possible) - - Will adjust the file offset. - - Args: - fileobj (fileobj) - dest (int): The destination offset - src (int): The source offset - count (int) The amount of data to move - Raises: - mmap.error: In case move failed - IOError: In case an operation on the fileobj fails - ValueError: In case invalid parameters were given - """ - - assert mmap is not None, "no mmap support" - - if dest < 0 or src < 0 or count < 0: - raise ValueError("Invalid parameters") - - try: - fileno = fileobj.fileno() - except (AttributeError, IOError): - raise mmap.error( - "File object does not expose/support a file descriptor") - - fileobj.seek(0, 2) - filesize = fileobj.tell() - length = max(dest, src) + count - - if length > filesize: - raise ValueError("Not in file size boundary") - - offset = ((min(dest, src) // mmap.ALLOCATIONGRANULARITY) * - mmap.ALLOCATIONGRANULARITY) - assert dest >= offset - assert src >= offset - assert offset % mmap.ALLOCATIONGRANULARITY == 0 - - # Windows doesn't handle empty mappings, add a fast path here instead - if count == 0: - return - - # fast path - if src == dest: - return - - fileobj.flush() - file_map = mmap.mmap(fileno, length - offset, offset=offset) - try: - file_map.move(dest - offset, src - offset, count) - finally: - file_map.close() - - -def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16): +def resize_file(fobj, diff: int, BUFFER_SIZE: int = _DEFAULT_BUFFER_SIZE) -> None: """Resize a file by `diff`. New space will be filled with zeros. @@ -778,7 +720,8 @@ def resize_file(fobj, diff, BUFFER_SIZE=2 ** 16): raise -def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16): +def move_bytes(fobj, dest: int, src: int, count: int, + BUFFER_SIZE: int = _DEFAULT_BUFFER_SIZE) -> None: """Moves data around using read()/write(). Args: @@ -821,12 +764,12 @@ def fallback_move(fobj, dest, src, count, BUFFER_SIZE=2 ** 16): fobj.flush() -def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): +def insert_bytes(fobj, size: int, offset: int, + BUFFER_SIZE: int = _DEFAULT_BUFFER_SIZE) -> None: """Insert size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. + equivalent. Args: fobj (fileobj) @@ -847,22 +790,15 @@ def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): raise ValueError resize_file(fobj, size, BUFFER_SIZE) - - if mmap is not None: - try: - mmap_move(fobj, offset + size, offset, movesize) - except mmap.error: - fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE) - else: - fallback_move(fobj, offset + size, offset, movesize, BUFFER_SIZE) + move_bytes(fobj, offset + size, offset, movesize, BUFFER_SIZE) -def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): +def delete_bytes(fobj, size: int, offset: int, + BUFFER_SIZE: int = _DEFAULT_BUFFER_SIZE) -> None: """Delete size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. + equivalent. Args: fobj (fileobj) @@ -882,18 +818,11 @@ def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): if movesize < 0: raise ValueError - if mmap is not None: - try: - mmap_move(fobj, offset, offset + size, movesize) - except mmap.error: - fallback_move(fobj, offset, offset + size, movesize, BUFFER_SIZE) - else: - fallback_move(fobj, offset, offset + size, movesize, BUFFER_SIZE) - + move_bytes(fobj, offset, offset + size, movesize, BUFFER_SIZE) resize_file(fobj, -size, BUFFER_SIZE) -def resize_bytes(fobj, old_size, new_size, offset): +def resize_bytes(fobj, old_size: int, new_size: int, offset: int) -> None: """Resize an area in a file adding and deleting at the end of it. Does nothing if no resizing is needed. @@ -933,13 +862,14 @@ def dict_match(d, key, default=None): if key in d and "[" not in key: return d[key] else: - for pattern, value in iteritems(d): + for pattern, value in d.items(): if fnmatchcase(key, pattern): return value return default -def encode_endian(text, encoding, errors="strict", le=True): +def encode_endian(text: str, encoding: str, + errors: str = "strict", le: bool = True) -> bytes: """Like text.encode(encoding) but always returns little endian/big endian BOMs instead of the system one. @@ -971,7 +901,8 @@ def encode_endian(text, encoding, errors="strict", le=True): return text.encode(encoding, errors) -def decode_terminated(data, encoding, strict=True): +def decode_terminated(data: bytes, encoding: str, + strict: bool = True) -> Tuple[str, bytes]: """Returns the decoded data until the first NULL terminator and all data after it. @@ -1011,7 +942,7 @@ def decode_terminated(data, encoding, strict=True): # slow path decoder = codec_info.incrementaldecoder() - r = [] + r: List[str] = [] for i, b in enumerate(iterbytes(data)): c = decoder.decode(b) if c == u"\x00": @@ -1037,7 +968,7 @@ def __init__(self, fileobj): self._bits = 0 self._pos = fileobj.tell() - def bits(self, count): + def bits(self, count: int) -> int: """Reads `count` bits and returns an uint, MSB read first. May raise BitReaderError if not enough data could be read or @@ -1062,7 +993,7 @@ def bits(self, count): assert self._bits < 8 return value - def bytes(self, count): + def bytes(self, count: int) -> bytes: """Returns a bytearray of length `count`. Works unaligned.""" if count < 0: @@ -1075,9 +1006,9 @@ def bytes(self, count): raise BitReaderError("not enough data") return data - return bytes(bytearray(self.bits(8) for _ in xrange(count))) + return bytes(bytearray(self.bits(8) for _ in range(count))) - def skip(self, count): + def skip(self, count: int) -> None: """Skip `count` bits. Might raise BitReaderError if there wasn't enough data to skip, @@ -1096,12 +1027,12 @@ def skip(self, count): count -= n_bytes * 8 self.bits(count) - def get_position(self): + def get_position(self) -> int: """Returns the amount of bits read or skipped so far""" return (self._fileobj.tell() - self._pos) * 8 - self._bits - def align(self): + def align(self) -> int: """Align to the next byte, returns the amount of bits skipped""" bits = self._bits @@ -1109,7 +1040,7 @@ def align(self): self._bits = 0 return bits - def is_aligned(self): + def is_aligned(self) -> bool: """If we are currently aligned to bytes and nothing is buffered""" return self._bits == 0 diff --git a/script.module.mutagen/lib/mutagen/_vorbis.py b/script.module.mutagen/lib/mutagen/_vorbis.py index efc615958..1168d1809 100644 --- a/script.module.mutagen/lib/mutagen/_vorbis.py +++ b/script.module.mutagen/lib/mutagen/_vorbis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005-2006 Joe Wreschnig # 2013 Christoph Reiter # @@ -17,13 +16,13 @@ """ import sys +from io import BytesIO import mutagen -from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2 -from mutagen._util import DictMixin, cdata, MutagenError +from mutagen._util import DictMixin, cdata, MutagenError, reraise -def is_valid_key(key): +def is_valid_key(key: str) -> bool: """Return true if a string is a valid Vorbis comment key. Valid Vorbis comment keys are printable ASCII between 0x20 (space) @@ -32,7 +31,7 @@ def is_valid_key(key): Takes str/unicode in Python 2, unicode in Python 3 """ - if PY3 and isinstance(key, bytes): + if isinstance(key, bytes): raise TypeError("needs to be str not bytes") for c in key: @@ -104,7 +103,7 @@ def load(self, fileobj, errors='replace', framing=True): vendor_length = cdata.uint_le(fileobj.read(4)) self.vendor = fileobj.read(vendor_length).decode('utf-8', errors) count = cdata.uint_le(fileobj.read(4)) - for i in xrange(count): + for i in range(count): length = cdata.uint_le(fileobj.read(4)) try: string = fileobj.read(length).decode('utf-8', errors) @@ -124,9 +123,7 @@ def load(self, fileobj, errors='replace', framing=True): except UnicodeEncodeError: raise VorbisEncodingError("invalid tag name %r" % tag) else: - # string keys in py3k - if PY3: - tag = tag.decode("ascii") + tag = tag.decode("ascii") if is_valid_key(tag): self.append((tag, value)) @@ -145,14 +142,8 @@ def validate(self): In Python 3 all keys and values have to be a string. """ - if not isinstance(self.vendor, text_type): - if PY3: - raise ValueError("vendor needs to be str") - - try: - self.vendor.decode('utf-8') - except UnicodeDecodeError: - raise ValueError + if not isinstance(self.vendor, str): + raise ValueError("vendor needs to be str") for key, value in self: try: @@ -161,20 +152,13 @@ def validate(self): except TypeError: raise ValueError("%r is not a valid key" % key) - if not isinstance(value, text_type): - if PY3: - err = "%r needs to be str for key %r" % (value, key) - raise ValueError(err) - - try: - value.decode("utf-8") - except Exception: - err = "%r is not a valid value for key %r" % (value, key) - raise ValueError(err) + if not isinstance(value, str): + err = "%r needs to be str for key %r" % (value, key) + raise ValueError(err) return True - def clear(self): + def clear(self) -> None: """Clear all keys from the comment.""" for i in list(self): @@ -212,10 +196,10 @@ def _encode(value): f.write(b"\x01") return f.getvalue() - def pprint(self): + def pprint(self) -> str: def _decode(value): - if not isinstance(value, text_type): + if not isinstance(value, str): return value.decode('utf-8', 'replace') return value @@ -223,7 +207,7 @@ def _decode(value): return u"\n".join(tags) -class VCommentDict(VComment, DictMixin): +class VCommentDict(VComment, DictMixin): # type: ignore """A VComment that looks like a dictionary. This object differs from a dictionary in two ways. First, @@ -244,7 +228,6 @@ def __getitem__(self, key): work. """ - # PY3 only if isinstance(key, slice): return VComment.__getitem__(self, key) @@ -262,7 +245,6 @@ def __getitem__(self, key): def __delitem__(self, key): """Delete all values associated with the key.""" - # PY3 only if isinstance(key, slice): return VComment.__delitem__(self, key) @@ -298,7 +280,6 @@ def __setitem__(self, key, values): string. """ - # PY3 only if isinstance(key, slice): return VComment.__setitem__(self, key, values) @@ -308,13 +289,10 @@ def __setitem__(self, key, values): if not isinstance(values, list): values = [values] try: - del(self[key]) + del self[key] except KeyError: pass - if PY2: - key = key.encode('ascii') - for value in values: self.append((key, value)) diff --git a/script.module.mutagen/lib/mutagen/aac.py b/script.module.mutagen/lib/mutagen/aac.py index fa6f7064c..274b20dfd 100644 --- a/script.module.mutagen/lib/mutagen/aac.py +++ b/script.module.mutagen/lib/mutagen/aac.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2014 Christoph Reiter # # This program is free software; you can redistribute it and/or modify @@ -15,9 +14,8 @@ from mutagen import StreamInfo from mutagen._file import FileType from mutagen._util import BitReader, BitReaderError, MutagenError, loadfile, \ - convert_error + convert_error, endswith from mutagen.id3._util import BitPaddedInt -from mutagen._compat import endswith, xrange _FREQS = [ @@ -243,7 +241,7 @@ def __init__(self, r): elms = num_front_channel_elements + num_side_channel_elements + \ num_back_channel_elements channels = 0 - for i in xrange(elms): + for i in range(elms): channels += 1 element_is_cpe = r.bits(1) if element_is_cpe: @@ -323,7 +321,7 @@ def _parse_adif(self, fileobj): self.channels = pce.channels # other pces.. - for i in xrange(npce): + for i in range(npce): ProgramConfigElement(r) r.align() except BitReaderError as e: @@ -347,7 +345,7 @@ def _parse_adts(self, fileobj, start_offset): # Try up to X times to find a sync word and read up to Y frames. # If more than Z frames are valid we assume a valid stream offset = start_offset - for i in xrange(max_sync_tries): + for i in range(max_sync_tries): fileobj.seek(offset) s = _ADTSStream.find_stream(fileobj, max_initial_read) if s is None: @@ -355,7 +353,7 @@ def _parse_adts(self, fileobj, start_offset): # start right after the last found offset offset += s.offset + 1 - for i in xrange(frames_max): + for i in range(frames_max): if not s.parse_frame(): break if not s.sync(max_resync_read): @@ -375,7 +373,10 @@ def _parse_adts(self, fileobj, start_offset): fileobj.seek(0, 2) stream_size = fileobj.tell() - (offset + s.offset) # approx - self.length = float(s.samples * stream_size) / (s.size * s.frequency) + self.length = 0.0 + if s.frequency != 0: + self.length = \ + float(s.samples * stream_size) / (s.size * s.frequency) def pprint(self): return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % ( diff --git a/script.module.mutagen/lib/mutagen/ac3.py b/script.module.mutagen/lib/mutagen/ac3.py index 09c9be5b9..d31408db8 100644 --- a/script.module.mutagen/lib/mutagen/ac3.py +++ b/script.module.mutagen/lib/mutagen/ac3.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2019 Philipp Wolfer # # This program is free software; you can redistribute it and/or modify @@ -13,7 +12,6 @@ __all__ = ["AC3", "Open"] from mutagen import StreamInfo -from mutagen._compat import endswith from mutagen._file import FileType from mutagen._util import ( BitReader, @@ -22,6 +20,7 @@ convert_error, enum, loadfile, + endswith, ) @@ -220,7 +219,7 @@ def _skip_unused_header_bits_normal(bitreader, channel_mode): if timecod2e: r.skip(14) # Time Code Second Half if r.bits(1): # Additional Bit Stream Information Exists - addbsil = r.bit(6) # Additional Bit Stream Information Length + addbsil = r.bits(6) # Additional Bit Stream Information Length r.skip((addbsil + 1) * 8) @staticmethod @@ -271,7 +270,7 @@ def _skip_unused_header_bits_enhanced(bitreader, frame_type, channel_mode, if r.bits(1): # blkid r.skip(6) # frmsizecod if r.bits(1): # Additional Bit Stream Information Exists - addbsil = r.bit(6) # Additional Bit Stream Information Length + addbsil = r.bits(6) # Additional Bit Stream Information Length r.skip((addbsil + 1) * 8) @staticmethod diff --git a/script.module.mutagen/lib/mutagen/aiff.py b/script.module.mutagen/lib/mutagen/aiff.py index a8fb7fabe..74d2f03d6 100644 --- a/script.module.mutagen/lib/mutagen/aiff.py +++ b/script.module.mutagen/lib/mutagen/aiff.py @@ -1,7 +1,6 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2014 Evan Purkhiser # 2014 Ben Ockmore -# 2019 Philipp Wolfer +# 2019-2020 Philipp Wolfer # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -10,32 +9,30 @@ """AIFF audio stream information and tags.""" -import sys import struct from struct import pack -from ._compat import endswith, text_type, reraise from mutagen import StreamInfo, FileType -from mutagen.id3 import ID3 from mutagen.id3._util import ID3NoHeaderError, error as ID3Error +from mutagen._iff import ( + IffChunk, + IffContainerChunkMixin, + IffFile, + IffID3, + InvalidChunk, + error as IffError, +) from mutagen._util import ( - MutagenError, convert_error, - delete_bytes, - insert_bytes, loadfile, - resize_bytes, + endswith, ) __all__ = ["AIFF", "Open", "delete"] -class error(MutagenError): - pass - - -class InvalidChunk(error): +class error(IffError): pass @@ -43,22 +40,10 @@ class InvalidChunk(error): _HUGE_VAL = 1.79769313486231e+308 -def is_valid_chunk_id(id): - assert isinstance(id, text_type) - - return ((len(id) <= 4) and (min(id) >= u' ') and - (max(id) <= u'~')) - - -def assert_valid_chunk_id(id): +def read_float(data): + """Raises OverflowError""" - assert isinstance(id, text_type) - - if not is_valid_chunk_id(id): - raise ValueError("AIFF key must be four ASCII characters.") - - -def read_float(data): # 10 bytes + assert len(data) == 10 expon, himant, lomant = struct.unpack('>hLL', data) sign = 1 if expon < 0: @@ -67,258 +52,70 @@ def read_float(data): # 10 bytes if expon == himant == lomant == 0: f = 0.0 elif expon == 0x7FFF: - f = _HUGE_VAL + raise OverflowError("inf and nan not supported") else: expon = expon - 16383 + # this can raise OverflowError too f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) return sign * f -class IFFChunk(object): +class AIFFChunk(IffChunk): """Representation of a single IFF chunk""" - # Chunk headers are 8 bytes long (4 for ID and 4 for the size) - HEADER_SIZE = 8 - @classmethod - def parse(cls, fileobj, parent_chunk=None): - header = fileobj.read(cls.HEADER_SIZE) - if len(header) < cls.HEADER_SIZE: - raise InvalidChunk('Header size < %i' % cls.HEADER_SIZE) - - id, data_size = struct.unpack('>4sI', header) - try: - id = id.decode('ascii').rstrip() - except UnicodeDecodeError as e: - raise InvalidChunk(e) - - if not is_valid_chunk_id(id): - raise InvalidChunk('Invalid chunk ID %s' % id) - - return cls.get_class(id)(fileobj, id, data_size, parent_chunk) + def parse_header(cls, header): + return struct.unpack('>4sI', header) @classmethod def get_class(cls, id): if id == 'FORM': - return FormIFFChunk + return AIFFFormChunk else: return cls - def __init__(self, fileobj, id, data_size, parent_chunk): - self._fileobj = fileobj - self.id = id - self.data_size = data_size - self.parent_chunk = parent_chunk - self.data_offset = fileobj.tell() - self.offset = self.data_offset - self.HEADER_SIZE - self._calculate_size() - - def read(self): - """Read the chunks data""" - - self._fileobj.seek(self.data_offset) - return self._fileobj.read(self.data_size) - - def write(self, data): - """Write the chunk data""" - - if len(data) > self.data_size: - raise ValueError - - self._fileobj.seek(self.data_offset) - self._fileobj.write(data) - # Write the padding bytes - padding = self.padding() - if padding: - self._fileobj.seek(self.data_offset + self.data_size) - self._fileobj.write(b'\x00' * padding) - - def delete(self): - """Removes the chunk from the file""" - - delete_bytes(self._fileobj, self.size, self.offset) - if self.parent_chunk is not None: - self.parent_chunk._remove_subchunk(self) - self._fileobj.flush() - - def _update_size(self, size_diff, changed_subchunk=None): - """Update the size of the chunk""" - - old_size = self.size - self.data_size += size_diff - self._fileobj.seek(self.offset + 4) + def write_new_header(self, id_, size): + self._fileobj.write(pack('>4sI', id_, size)) + + def write_size(self): self._fileobj.write(pack('>I', self.data_size)) - self._calculate_size() - if self.parent_chunk is not None: - self.parent_chunk._update_size(self.size - old_size, self) - if changed_subchunk: - self._update_sibling_offsets( - changed_subchunk, old_size - self.size) - - def _calculate_size(self): - self.size = self.HEADER_SIZE + self.data_size + self.padding() - assert self.size % 2 == 0 - - def resize(self, new_data_size): - """Resize the file and update the chunk sizes""" - - padding = new_data_size % 2 - resize_bytes(self._fileobj, self.data_size + self.padding(), - new_data_size + padding, self.data_offset) - size_diff = new_data_size - self.data_size - self._update_size(size_diff) - self._fileobj.flush() - - def padding(self): - """Returns the number of padding bytes (0 or 1). - IFF chunks are required to be a even number in total length. If - data_size is odd a padding byte will be added at the end. - """ - return self.data_size % 2 - - -class FormIFFChunk(IFFChunk): - """A IFF chunk containing other chunks. - This is either a 'LIST' or 'RIFF' - """ - MIN_DATA_SIZE = 4 + +class AIFFFormChunk(AIFFChunk, IffContainerChunkMixin): + """The AIFF root chunk.""" + + def parse_next_subchunk(self): + return AIFFChunk.parse(self._fileobj, self) def __init__(self, fileobj, id, data_size, parent_chunk): if id != u'FORM': raise InvalidChunk('Expected FORM chunk, got %s' % id) - IFFChunk.__init__(self, fileobj, id, data_size, parent_chunk) - - # Lists always store an addtional identifier as 4 bytes - if data_size < self.MIN_DATA_SIZE: - raise InvalidChunk('FORM data size < %i' % self.MIN_DATA_SIZE) + AIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk) + self.init_container() - # Read the FORM id (usually AIFF) - try: - self.name = fileobj.read(4).decode('ascii') - except UnicodeDecodeError as e: - raise error(e) - # Load all IFF subchunks - self.__subchunks = [] - - def subchunks(self): - """Returns a list of all subchunks. - The list is lazily loaded on first access. - """ - if not self.__subchunks: - next_offset = self.data_offset + 4 - while next_offset < self.offset + self.size: - self._fileobj.seek(next_offset) - try: - chunk = IFFChunk.parse(self._fileobj, self) - except InvalidChunk: - break - self.__subchunks.append(chunk) - - # Calculate the location of the next chunk - next_offset = chunk.offset + chunk.size - return self.__subchunks - - def insert_chunk(self, id_, data=None): - """Insert a new chunk at the end of the FORM chunk""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("Invalid IFF key.") - - next_offset = self.offset + self.size - size = self.HEADER_SIZE - data_size = 0 - if data: - data_size = len(data) - padding = data_size % 2 - size += data_size + padding - insert_bytes(self._fileobj, size, next_offset) - self._fileobj.seek(next_offset) - self._fileobj.write( - pack('>4si', id_.ljust(4).encode('ascii'), data_size)) - self._fileobj.seek(next_offset) - chunk = IFFChunk.parse(self._fileobj, self) - self._update_size(chunk.size) - if data: - chunk.write(data) - self.subchunks().append(chunk) - self._fileobj.flush() - return chunk - - def _remove_subchunk(self, chunk): - assert chunk in self.__subchunks - self._update_size(-chunk.size, chunk) - self.__subchunks.remove(chunk) - - def _update_sibling_offsets(self, changed_subchunk, size_diff): - """Update the offsets of subchunks after `changed_subchunk`. - """ - index = self.__subchunks.index(changed_subchunk) - sibling_chunks = self.__subchunks[index + 1:len(self.__subchunks)] - for sibling in sibling_chunks: - sibling.offset -= size_diff - sibling.data_offset -= size_diff - - -class IFFFile(object): - """Representation of a IFF file""" +class AIFFFile(IffFile): + """Representation of a AIFF file""" def __init__(self, fileobj): # AIFF Files always start with the FORM chunk which contains a 4 byte # ID before the start of other chunks - fileobj.seek(0) - self.root = IFFChunk.parse(fileobj) + super().__init__(AIFFChunk, fileobj) if self.root.id != u'FORM': - raise InvalidChunk("Root chunk must be a RIFF chunk, got %s" + raise InvalidChunk("Root chunk must be a FORM chunk, got %s" % self.root.id) def __contains__(self, id_): - """Check if the IFF file contains a specific chunk""" - - assert_valid_chunk_id(id_) - try: - self[id_] + if id_ == 'FORM': # For backwards compatibility return True - except KeyError: - return False + return super().__contains__(id_) def __getitem__(self, id_): - """Get a chunk from the IFF file""" - - assert_valid_chunk_id(id_) if id_ == 'FORM': # For backwards compatibility return self.root - found_chunk = None - for chunk in self.root.subchunks(): - if chunk.id == id_: - found_chunk = chunk - break - else: - raise KeyError("No %r chunk found" % id_) - return found_chunk - - def __delitem__(self, id_): - """Remove a chunk from the IFF file""" - - assert_valid_chunk_id(id_) - self.delete_chunk(id_) - - def delete_chunk(self, id_): - """Remove a chunk from the RIFF file""" - - assert_valid_chunk_id(id_) - self[id_].delete() - - def insert_chunk(self, id_, data=None): - """Insert a new chunk at the end of the IFF file""" - - assert_valid_chunk_id(id_) - return self.root.insert_chunk(id_, data) + return super().__getitem__(id_) class AIFFInfo(StreamInfo): @@ -345,7 +142,7 @@ class AIFFInfo(StreamInfo): def __init__(self, fileobj): """Raises error""" - iff = IFFFile(fileobj) + iff = AIFFFile(fileobj) try: common_chunk = iff[u'COMM'] except KeyError as e: @@ -358,9 +155,15 @@ def __init__(self, fileobj): info = struct.unpack('>hLh10s', data[:18]) channels, frame_count, sample_size, sample_rate = info - self.sample_rate = int(read_float(sample_rate)) - if self.sample_rate > 0: + try: + self.sample_rate = int(read_float(sample_rate)) + except OverflowError: + raise error("Invalid sample rate") + if self.sample_rate < 0: + raise error("Invalid sample rate") + if self.sample_rate != 0: self.length = frame_count / float(self.sample_rate) + self.bits_per_sample = sample_size self.sample_size = sample_size # For backward compatibility self.channels = channels @@ -371,45 +174,11 @@ def pprint(self): self.channels, self.bitrate, self.sample_rate, self.length) -class _IFFID3(ID3): +class _IFFID3(IffID3): """A AIFF file with ID3v2 tags""" - def _pre_load_header(self, fileobj): - try: - fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset) - except (InvalidChunk, KeyError): - raise ID3NoHeaderError("No ID3 chunk") - - @convert_error(IOError, error) - @loadfile(writable=True) - def save(self, filething=None, v2_version=4, v23_sep='/', padding=None): - """Save ID3v2 data to the AIFF file""" - - fileobj = filething.fileobj - - iff_file = IFFFile(fileobj) - - if u'ID3' not in iff_file: - iff_file.insert_chunk(u'ID3') - - chunk = iff_file[u'ID3'] - - try: - data = self._prepare_data( - fileobj, chunk.data_offset, chunk.data_size, v2_version, - v23_sep, padding) - except ID3Error as e: - reraise(error, e, sys.exc_info()[2]) - - chunk.resize(len(data)) - chunk.write(data) - - @loadfile(writable=True) - def delete(self, filething=None): - """Completely removes the ID3 chunk from the AIFF file""" - - delete(filething) - self.clear() + def _load_file(self, fileobj): + return AIFFFile(fileobj) @convert_error(IOError, error) @@ -418,7 +187,7 @@ def delete(filething): """Completely removes the ID3 chunk from the AIFF file""" try: - del IFFFile(filething.fileobj)[u'ID3'] + del AIFFFile(filething.fileobj)[u'ID3'] except KeyError: pass diff --git a/script.module.mutagen/lib/mutagen/apev2.py b/script.module.mutagen/lib/mutagen/apev2.py index 76f70391a..4c57d94d3 100644 --- a/script.module.mutagen/lib/mutagen/apev2.py +++ b/script.module.mutagen/lib/mutagen/apev2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -32,29 +31,18 @@ import sys import struct -try: - # Python 3 - from collections.abc import MutableSequence -except ImportError: - # Python 2.7 - from collections import MutableSequence - -from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string, - xrange) +from io import BytesIO +from collections.abc import MutableSequence + from mutagen import Metadata, FileType, StreamInfo from mutagen._util import DictMixin, cdata, delete_bytes, total_ordering, \ - MutagenError, loadfile, convert_error, seek_end, get_size + MutagenError, loadfile, convert_error, seek_end, get_size, reraise def is_valid_apev2_key(key): - if not isinstance(key, text_type): - if PY3: - raise TypeError("APEv2 key must be str") - - try: - key = key.decode('ascii') - except UnicodeDecodeError: - return False + # https://wiki.hydrogenaud.io/index.php?title=APE_key + if not isinstance(key, str): + raise TypeError("APEv2 key must be str") # PY26 - Change to set literal syntax (since set is faster than list here) return ((2 <= len(key) <= 255) and (min(key) >= u' ') and @@ -66,7 +54,7 @@ def is_valid_apev2_key(key): # 1: Item contains binary information # 2: Item is a locator of external stored information [e.g. URL] # 3: reserved" -TEXT, BINARY, EXTERNAL = xrange(3) +TEXT, BINARY, EXTERNAL = range(3) HAS_HEADER = 1 << 31 HAS_NO_FOOTER = 1 << 30 @@ -264,8 +252,8 @@ def __setitem__(self, key, value): def __delitem__(self, key): lower = key.lower() - del(self.__casemap[lower]) - del(self.__dict[lower]) + del self.__casemap[lower] + del self.__dict[lower] def keys(self): return [self.__casemap.get(key, key) for key in self.__dict.keys()] @@ -306,9 +294,9 @@ def load(self, filething): def __parse_tag(self, tag, count): """Raises IOError and APEBadItemError""" - fileobj = cBytesIO(tag) + fileobj = BytesIO(tag) - for i in xrange(count): + for i in range(count): tag_data = fileobj.read(8) # someone writes wrong item counts if not tag_data: @@ -335,11 +323,14 @@ def __parse_tag(self, tag, count): if key[-1:] == b"\x00": key = key[:-1] - if PY3: - try: - key = key.decode("ascii") - except UnicodeError as err: - reraise(APEBadItemError, err, sys.exc_info()[2]) + try: + key = key.decode("ascii") + except UnicodeError as err: + reraise(APEBadItemError, err, sys.exc_info()[2]) + + if not is_valid_apev2_key(key): + raise APEBadItemError("%r is not a valid APEv2 key" % key) + value = fileobj.read(size) if len(value) != size: raise APEBadItemError @@ -351,16 +342,12 @@ def __parse_tag(self, tag, count): def __getitem__(self, key): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') return super(APEv2, self).__getitem__(key) def __delitem__(self, key): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') super(APEv2, self).__delitem__(key) @@ -388,37 +375,22 @@ def __setitem__(self, key, value): if not is_valid_apev2_key(key): raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') - if not isinstance(value, _APEValue): # let's guess at the content if we're not already a value... - if isinstance(value, text_type): + if isinstance(value, str): # unicode? we've got to be text. value = APEValue(value, TEXT) elif isinstance(value, list): items = [] for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("item in list not str") - v = v.decode("utf-8") + if not isinstance(v, str): + raise TypeError("item in list not str") items.append(v) # list? text. value = APEValue(u"\0".join(items), TEXT) else: - if PY3: - value = APEValue(value, BINARY) - else: - try: - value.decode("utf-8") - except UnicodeError: - # invalid UTF8 text, probably binary - value = APEValue(value, BINARY) - else: - # valid UTF8, probably text - value = APEValue(value, TEXT) + value = APEValue(value, BINARY) super(APEv2, self).__setitem__(key, value) @@ -549,7 +521,7 @@ def APEValue(value, kind): class _APEValue(object): - kind = None + kind: int value = None def __init__(self, value, kind=None): @@ -583,7 +555,6 @@ def __repr__(self): return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind) -@swap_to_string @total_ordering class _APEUtf8Value(_APEValue): @@ -594,11 +565,8 @@ def _parse(self, data): reraise(APEBadItemError, e, sys.exc_info()[2]) def _validate(self, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") + if not isinstance(value, str): + raise TypeError("value not str") return value def _write(self): @@ -641,22 +609,16 @@ def __len__(self): return self.value.count(u"\0") + 1 def __setitem__(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") + if not isinstance(value, str): + raise TypeError("value not str") values = list(self) values[index] = value self.value = u"\0".join(values) def insert(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") + if not isinstance(value, str): + raise TypeError("value not str") values = list(self) values.insert(index, value) @@ -671,7 +633,6 @@ def pprint(self): return u" / ".join(self) -@swap_to_string @total_ordering class APEBinaryValue(_APEValue): """An APEv2 binary value.""" diff --git a/script.module.mutagen/lib/mutagen/asf/__init__.py b/script.module.mutagen/lib/mutagen/asf/__init__.py index 5b9bb72c1..a756a6917 100644 --- a/script.module.mutagen/lib/mutagen/asf/__init__.py +++ b/script.module.mutagen/lib/mutagen/asf/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005-2006 Joe Wreschnig # Copyright (C) 2006-2007 Lukas Lalinsky # @@ -13,7 +12,6 @@ from mutagen import FileType, Tags, StreamInfo from mutagen._util import resize_bytes, DictMixin, loadfile, convert_error -from mutagen._compat import string_types, long_, PY3, izip from ._util import error, ASFError, ASFHeaderError from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \ @@ -24,7 +22,7 @@ ASFUnicodeAttribute, ASFBaseAttribute, ASFValue -# pyflakes +# flake8 error, ASFError, ASFHeaderError, ASFValue @@ -75,7 +73,7 @@ def pprint(self): return s -class ASFTags(list, DictMixin, Tags): +class ASFTags(list, DictMixin, Tags): # type: ignore """ASFTags() Dictionary containing ASF attributes. @@ -89,7 +87,6 @@ def __getitem__(self, key): """ - # PY3 only if isinstance(key, slice): return list.__getitem__(self, key) @@ -102,7 +99,6 @@ def __getitem__(self, key): def __delitem__(self, key): """Delete all values associated with the key.""" - # PY3 only if isinstance(key, slice): return list.__delitem__(self, key) @@ -129,7 +125,6 @@ def __setitem__(self, key, values): string. """ - # PY3 only if isinstance(key, slice): return list.__setitem__(self, key, values) @@ -139,22 +134,20 @@ def __setitem__(self, key, values): to_append = [] for value in values: if not isinstance(value, ASFBaseAttribute): - if isinstance(value, string_types): + if isinstance(value, str): value = ASFUnicodeAttribute(value) - elif PY3 and isinstance(value, bytes): + elif isinstance(value, bytes): value = ASFByteArrayAttribute(value) elif isinstance(value, bool): value = ASFBoolAttribute(value) elif isinstance(value, int): value = ASFDWordAttribute(value) - elif isinstance(value, long_): - value = ASFQWordAttribute(value) else: raise TypeError("Invalid type %r" % type(value)) to_append.append((key, value)) try: - del(self[key]) + del self[key] except KeyError: pass @@ -163,7 +156,7 @@ def __setitem__(self, key, values): def keys(self): """Return a sequence of all keys in the comment.""" - return self and set(next(izip(*self))) + return self and set(next(zip(*self))) def as_dict(self): """Return a copy of the comment data in a real dict.""" diff --git a/script.module.mutagen/lib/mutagen/asf/_attrs.py b/script.module.mutagen/lib/mutagen/asf/_attrs.py index 8111c1c27..0417d9db4 100644 --- a/script.module.mutagen/lib/mutagen/asf/_attrs.py +++ b/script.module.mutagen/lib/mutagen/asf/_attrs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005-2006 Joe Wreschnig # Copyright (C) 2006-2007 Lukas Lalinsky # @@ -9,9 +8,9 @@ import sys import struct +from typing import Dict, Type -from mutagen._compat import swap_to_string, text_type, PY2, reraise -from mutagen._util import total_ordering +from mutagen._util import total_ordering, reraise from ._util import ASFError @@ -19,9 +18,9 @@ class ASFBaseAttribute(object): """Generic attribute.""" - TYPE = None + TYPE: int - _TYPES = {} + _TYPES: "Dict[int, Type[ASFBaseAttribute]]" = {} value = None """The Python value of this attribute (type depends on the class)""" @@ -103,7 +102,6 @@ def render_ml(self, name): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFUnicodeAttribute(ASFBaseAttribute): """Unicode string attribute. @@ -122,11 +120,8 @@ def parse(self, data): reraise(ASFError, e, sys.exc_info()[2]) def _validate(self, value): - if not isinstance(value, text_type): - if PY2: - return value.decode("utf-8") - else: - raise TypeError("%r not str" % value) + if not isinstance(value, str): + raise TypeError("%r not str" % value) return value def _render(self): @@ -142,16 +137,15 @@ def __str__(self): return self.value def __eq__(self, other): - return text_type(self) == other + return str(self) == other def __lt__(self, other): - return text_type(self) < other + return str(self) < other __hash__ = ASFBaseAttribute.__hash__ @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFByteArrayAttribute(ASFBaseAttribute): """Byte array attribute. @@ -194,7 +188,6 @@ def __lt__(self, other): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFBoolAttribute(ASFBaseAttribute): """Bool attribute. @@ -228,10 +221,10 @@ def __bool__(self): return bool(self.value) def __bytes__(self): - return text_type(self.value).encode('utf-8') + return str(self.value).encode('utf-8') def __str__(self): - return text_type(self.value) + return str(self.value) def __eq__(self, other): return bool(self.value) == other @@ -243,7 +236,6 @@ def __lt__(self, other): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFDWordAttribute(ASFBaseAttribute): """DWORD attribute. @@ -274,10 +266,10 @@ def __int__(self): return self.value def __bytes__(self): - return text_type(self.value).encode('utf-8') + return str(self.value).encode('utf-8') def __str__(self): - return text_type(self.value) + return str(self.value) def __eq__(self, other): return int(self.value) == other @@ -289,7 +281,6 @@ def __lt__(self, other): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFQWordAttribute(ASFBaseAttribute): """QWORD attribute. @@ -320,10 +311,10 @@ def __int__(self): return self.value def __bytes__(self): - return text_type(self.value).encode('utf-8') + return str(self.value).encode('utf-8') def __str__(self): - return text_type(self.value) + return str(self.value) def __eq__(self, other): return int(self.value) == other @@ -335,7 +326,6 @@ def __lt__(self, other): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFWordAttribute(ASFBaseAttribute): """WORD attribute. @@ -366,10 +356,10 @@ def __int__(self): return self.value def __bytes__(self): - return text_type(self.value).encode('utf-8') + return str(self.value).encode('utf-8') def __str__(self): - return text_type(self.value) + return str(self.value) def __eq__(self, other): return int(self.value) == other @@ -381,7 +371,6 @@ def __lt__(self, other): @ASFBaseAttribute._register -@swap_to_string @total_ordering class ASFGUIDAttribute(ASFBaseAttribute): """GUID attribute.""" diff --git a/script.module.mutagen/lib/mutagen/asf/_objects.py b/script.module.mutagen/lib/mutagen/asf/_objects.py index 156a304d8..df95bacea 100644 --- a/script.module.mutagen/lib/mutagen/asf/_objects.py +++ b/script.module.mutagen/lib/mutagen/asf/_objects.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005-2006 Joe Wreschnig # Copyright (C) 2006-2007 Lukas Lalinsky # @@ -8,9 +7,9 @@ # (at your option) any later version. import struct +from typing import Dict, Type from mutagen._util import cdata, get_size -from mutagen._compat import text_type, xrange, izip from mutagen._tags import PaddingInfo from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError @@ -20,8 +19,8 @@ class BaseObject(object): """Base ASF object.""" - GUID = None - _TYPES = {} + GUID: bytes + _TYPES: "Dict[bytes, Type[BaseObject]]" = {} def __init__(self): self.objects = [] @@ -89,7 +88,7 @@ def parse_full(cls, asf, fileobj): remaining_header, num_objects = cls.parse_size(fileobj) remaining_header -= 30 - for i in xrange(num_objects): + for i in range(num_objects): obj_header_size = 24 if remaining_header < obj_header_size: raise ASFHeaderError("invalid header size") @@ -114,7 +113,10 @@ def parse_full(cls, asf, fileobj): if len(data) != payload_size: raise ASFHeaderError("truncated") - obj.parse(asf, data) + try: + obj.parse(asf, data) + except struct.error: + raise ASFHeaderError("truncated") header.objects.append(obj) return header @@ -151,7 +153,8 @@ def render_full(self, asf, fileobj, available, padding_func): # ask the user for padding adjustments file_size = get_size(fileobj) content_size = file_size - available - assert content_size >= 0 + if content_size < 0: + raise ASFHeaderError("truncated content") info = PaddingInfo(available - needed_size, content_size) # add padding @@ -200,7 +203,7 @@ def parse(self, asf, data): texts.append(None) pos = end - for key, value in izip(self.NAMES, texts): + for key, value in zip(self.NAMES, texts): if value is not None: value = ASFUnicodeAttribute(value=value) asf._tags.setdefault(self.GUID, []).append((key, value)) @@ -209,7 +212,7 @@ def render(self, asf): def render_text(name): value = asf.to_content_description.get(name) if value is not None: - return text_type(value).encode("utf-16-le") + b"\x00\x00" + return str(value).encode("utf-16-le") + b"\x00\x00" else: return b"" @@ -228,7 +231,7 @@ def parse(self, asf, data): super(ExtendedContentDescriptionObject, self).parse(asf, data) num_attributes, = struct.unpack("= 0 asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0) @@ -319,7 +324,7 @@ def parse(self, asf, data): offset = 16 count, offset = cdata.uint32_le_from(data, offset) - for i in xrange(count): + for i in range(count): try: offset, type_, name, desc, codec = \ self._parse_entry(data, offset) @@ -377,6 +382,8 @@ def parse(self, asf, data): while datapos < datasize: guid, size = struct.unpack( "<16sQ", data[22 + datapos:22 + datapos + 24]) + if size < 1: + raise ASFHeaderError("invalid size in header extension") obj = BaseObject._get_object(guid) obj.parse(asf, data[22 + datapos + 24:22 + datapos + size]) self.objects.append(obj) @@ -407,7 +414,7 @@ def parse(self, asf, data): super(MetadataObject, self).parse(asf, data) num_attributes, = struct.unpack(" bytes: """Converts a GUID to the serialized bytes representation""" assert isinstance(s, str) @@ -38,13 +38,13 @@ def guid2bytes(s): ]) -def bytes2guid(s): +def bytes2guid(s: bytes) -> str: """Converts a serialized GUID to a text GUID""" assert isinstance(s, bytes) u = struct.unpack - v = [] + v: List[int] = [] v.extend(u("HQ", s[8:10] + b"\x00\x00" + s[10:])) return "%08X-%04X-%04X-%04X-%012X" % tuple(v) @@ -285,7 +285,7 @@ def bytes2guid(s): 0xA10C: u"Media Foundation Spectrum Analyzer Output", 0xA10D: u"GSM 6.10 (Full-Rate) Speech", 0xA10E: u"GSM 6.20 (Half-Rate) Speech", - 0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech", + 0xA10F: u"GSM 6.60 (Enhanced Full-Rate) Speech", 0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech", 0xA111: u"GSM Adaptive Multi-Rate WideBand Speech", 0xA112: u"Polycom G.722", diff --git a/script.module.mutagen/lib/mutagen/dsdiff.py b/script.module.mutagen/lib/mutagen/dsdiff.py new file mode 100644 index 000000000..c4292b7c1 --- /dev/null +++ b/script.module.mutagen/lib/mutagen/dsdiff.py @@ -0,0 +1,266 @@ +# Copyright (C) 2020 Philipp Wolfer +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +"""DSDIFF audio stream information and tags.""" + +import struct + +from mutagen import StreamInfo +from mutagen._file import FileType +from mutagen._iff import ( + IffChunk, + IffContainerChunkMixin, + IffID3, + IffFile, + InvalidChunk, + error as IffError, +) +from mutagen.id3._util import ID3NoHeaderError, error as ID3Error +from mutagen._util import ( + convert_error, + loadfile, + endswith, +) + + +__all__ = ["DSDIFF", "Open", "delete"] + + +class error(IffError): + pass + + +# See +# https://dsd-guide.com/sites/default/files/white-papers/DSDIFF_1.5_Spec.pdf +class DSDIFFChunk(IffChunk): + """Representation of a single DSDIFF chunk""" + + HEADER_SIZE = 12 + + @classmethod + def parse_header(cls, header): + return struct.unpack('>4sQ', header) + + @classmethod + def get_class(cls, id): + if id in DSDIFFListChunk.LIST_CHUNK_IDS: + return DSDIFFListChunk + elif id == 'DST': + return DSTChunk + else: + return cls + + def write_new_header(self, id_, size): + self._fileobj.write(struct.pack('>4sQ', id_, size)) + + def write_size(self): + self._fileobj.write(struct.pack('>Q', self.data_size)) + + +class DSDIFFListChunk(DSDIFFChunk, IffContainerChunkMixin): + """A DSDIFF chunk containing other chunks. + """ + + LIST_CHUNK_IDS = ['FRM8', 'PROP'] + + def parse_next_subchunk(self): + return DSDIFFChunk.parse(self._fileobj, self) + + def __init__(self, fileobj, id, data_size, parent_chunk): + if id not in self.LIST_CHUNK_IDS: + raise InvalidChunk('Not a list chunk: %s' % id) + + DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk) + self.init_container() + + +class DSTChunk(DSDIFFChunk, IffContainerChunkMixin): + """A DSDIFF chunk containing other chunks. + """ + + def parse_next_subchunk(self): + return DSDIFFChunk.parse(self._fileobj, self) + + def __init__(self, fileobj, id, data_size, parent_chunk): + if id != 'DST': + raise InvalidChunk('Not a DST chunk: %s' % id) + + DSDIFFChunk.__init__(self, fileobj, id, data_size, parent_chunk) + self.init_container(name_size=0) + + +class DSDIFFFile(IffFile): + """Representation of a DSDIFF file""" + + def __init__(self, fileobj): + super().__init__(DSDIFFChunk, fileobj) + + if self.root.id != u'FRM8': + raise InvalidChunk("Root chunk must be a FRM8 chunk, got %r" + % self.root) + + +class DSDIFFInfo(StreamInfo): + + """DSDIFF stream information. + + Attributes: + channels (`int`): number of audio channels + length (`float`): file length in seconds, as a float + sample_rate (`int`): audio sampling rate in Hz + bits_per_sample (`int`): audio sample size (for DSD this is always 1) + bitrate (`int`): audio bitrate, in bits per second + compression (`str`): DSD (uncompressed) or DST + """ + + channels = 0 + length = 0 + sample_rate = 0 + bits_per_sample = 1 + bitrate = 0 + compression = None + + @convert_error(IOError, error) + def __init__(self, fileobj): + """Raises error""" + + iff = DSDIFFFile(fileobj) + try: + prop_chunk = iff['PROP'] + except KeyError as e: + raise error(str(e)) + + if prop_chunk.name == 'SND ': + for chunk in prop_chunk.subchunks(): + if chunk.id == 'FS' and chunk.data_size == 4: + data = chunk.read() + if len(data) < 4: + raise InvalidChunk("Not enough data in FS chunk") + self.sample_rate, = struct.unpack('>L', data[:4]) + elif chunk.id == 'CHNL' and chunk.data_size >= 2: + data = chunk.read() + if len(data) < 2: + raise InvalidChunk("Not enough data in CHNL chunk") + self.channels, = struct.unpack('>H', data[:2]) + elif chunk.id == 'CMPR' and chunk.data_size >= 4: + data = chunk.read() + if len(data) < 4: + raise InvalidChunk("Not enough data in CMPR chunk") + compression_id, = struct.unpack('>4s', data[:4]) + self.compression = compression_id.decode('ascii').rstrip() + + if self.sample_rate < 0: + raise error("Invalid sample rate") + + if self.compression == 'DSD': # not compressed + try: + dsd_chunk = iff['DSD'] + except KeyError as e: + raise error(str(e)) + + # DSD data has one bit per sample. Eight samples of a channel + # are clustered together for a channel byte. For multiple channels + # the channel bytes are interleaved (in the order specified in the + # CHNL chunk). See DSDIFF spec chapter 3.3. + sample_count = dsd_chunk.data_size * 8 / (self.channels or 1) + + if self.sample_rate != 0: + self.length = sample_count / float(self.sample_rate) + + self.bitrate = (self.channels * self.bits_per_sample + * self.sample_rate) + elif self.compression == 'DST': + try: + dst_frame = iff['DST'] + dst_frame_info = dst_frame['FRTE'] + except KeyError as e: + raise error(str(e)) + + if dst_frame_info.data_size >= 6: + data = dst_frame_info.read() + if len(data) < 6: + raise InvalidChunk("Not enough data in FRTE chunk") + frame_count, frame_rate = struct.unpack('>LH', data[:6]) + if frame_rate: + self.length = frame_count / frame_rate + + if frame_count: + dst_data_size = dst_frame.data_size - dst_frame_info.size + avg_frame_size = dst_data_size / frame_count + self.bitrate = avg_frame_size * 8 * frame_rate + + def pprint(self): + return u"%d channel DSDIFF (%s) @ %d bps, %s Hz, %.2f seconds" % ( + self.channels, self.compression, self.bitrate, self.sample_rate, + self.length) + + +class _DSDIFFID3(IffID3): + """A DSDIFF file with ID3v2 tags""" + + def _load_file(self, fileobj): + return DSDIFFFile(fileobj) + + +@convert_error(IOError, error) +@loadfile(method=False, writable=True) +def delete(filething): + """Completely removes the ID3 chunk from the DSDIFF file""" + + try: + del DSDIFFFile(filething.fileobj)[u'ID3'] + except KeyError: + pass + + +class DSDIFF(FileType): + """DSDIFF(filething) + + An DSDIFF audio file. + + For tagging ID3v2 data is added to a chunk with the ID "ID3 ". + + Arguments: + filething (filething) + + Attributes: + tags (`mutagen.id3.ID3`) + info (`DSDIFFInfo`) + """ + + _mimes = ["audio/x-dff"] + + @convert_error(IOError, error) + @loadfile() + def load(self, filething, **kwargs): + fileobj = filething.fileobj + + try: + self.tags = _DSDIFFID3(fileobj, **kwargs) + except ID3NoHeaderError: + self.tags = None + except ID3Error as e: + raise error(e) + else: + self.tags.filename = self.filename + + fileobj.seek(0, 0) + self.info = DSDIFFInfo(fileobj) + + def add_tags(self): + """Add empty ID3 tags to the file.""" + if self.tags is None: + self.tags = _DSDIFFID3() + else: + raise error("an ID3 tag already exists") + + @staticmethod + def score(filename, fileobj, header): + return header.startswith(b"FRM8") * 2 + endswith(filename, ".dff") + + +Open = DSDIFF diff --git a/script.module.mutagen/lib/mutagen/dsf.py b/script.module.mutagen/lib/mutagen/dsf.py index ae39cd8d5..121a01bae 100644 --- a/script.module.mutagen/lib/mutagen/dsf.py +++ b/script.module.mutagen/lib/mutagen/dsf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2017 Boris Pruessmann # # This program is free software; you can redistribute it and/or modify @@ -11,11 +10,11 @@ import sys import struct - -from ._compat import cBytesIO, reraise, endswith +from io import BytesIO from mutagen import FileType, StreamInfo -from mutagen._util import cdata, MutagenError, loadfile, convert_error +from mutagen._util import cdata, MutagenError, loadfile, \ + convert_error, reraise, endswith from mutagen.id3 import ID3 from mutagen.id3._util import ID3NoHeaderError, error as ID3Error @@ -80,7 +79,7 @@ def load(self): self.offset_metdata_chunk = cdata.ulonglong_le(data[20:28]) def write(self): - f = cBytesIO() + f = BytesIO() f.write(self.chunk_header) f.write(struct.pack("I", self.min_blocksize)[-2:]) f.write(struct.pack(">I", self.max_blocksize)[-2:]) f.write(struct.pack(">I", self.min_framesize)[-3:]) @@ -244,11 +243,11 @@ def write(self): byte = (self.sample_rate & 0xF) << 4 byte += ((self.channels - 1) & 7) << 1 byte += ((self.bits_per_sample - 1) >> 4) & 1 - f.write(chr_(byte)) + f.write(bchr(byte)) # 4 bits of bps, 4 of sample count byte = ((self.bits_per_sample - 1) & 0xF) << 4 byte += (self.total_samples >> 32) & 0xF - f.write(chr_(byte)) + f.write(bchr(byte)) # last 32 of sample count f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF)) # MD5 signature @@ -281,7 +280,7 @@ class SeekPoint(tuple): """ def __new__(cls, first_sample, byte_offset, num_samples): - return super(cls, SeekPoint).__new__( + return super(SeekPoint, cls).__new__( cls, (first_sample, byte_offset, num_samples)) def __getnewargs__(self): @@ -325,7 +324,7 @@ def load(self, data): sp = data.tryread(self.__SEEKPOINT_SIZE) def write(self): - f = cBytesIO() + f = BytesIO() for seekpoint in self.seekpoints: packed = struct.pack( self.__SEEKPOINT_FORMAT, @@ -374,7 +373,7 @@ class CueSheetTrackIndex(tuple): """ def __new__(cls, index_number, index_offset): - return super(cls, CueSheetTrackIndex).__new__( + return super(CueSheetTrackIndex, cls).__new__( cls, (index_number, index_offset)) index_number = property(lambda self: self[0]) @@ -388,7 +387,7 @@ class CueSheetTrack(object): For CD-DA, track_numbers must be 1-99, or 170 for the lead-out. Track_numbers must be unique within a cue sheet. There - must be atleast one index in every track except the lead-out track + must be at least one index in every track except the lead-out track which must have none. Attributes: @@ -487,7 +486,7 @@ def load(self, data): self.lead_in_samples = lead_in_samples self.compact_disc = bool(flags & 0x80) self.tracks = [] - for i in xrange(num_tracks): + for i in range(num_tracks): track = data.read(self.__CUESHEET_TRACK_SIZE) start_offset, track_number, isrc_padded, flags, num_indexes = \ struct.unpack(self.__CUESHEET_TRACK_FORMAT, track) @@ -496,7 +495,7 @@ def load(self, data): pre_emphasis = bool(flags & 0x40) val = CueSheetTrack( track_number, start_offset, isrc, type_, pre_emphasis) - for j in xrange(num_indexes): + for j in range(num_indexes): index = data.read(self.__CUESHEET_TRACKINDEX_SIZE) index_offset, index_number = struct.unpack( self.__CUESHEET_TRACKINDEX_FORMAT, index) @@ -505,7 +504,7 @@ def load(self, data): self.tracks.append(val) def write(self): - f = cBytesIO() + f = BytesIO() flags = 0 if self.compact_disc: flags |= 0x80 @@ -520,7 +519,7 @@ def write(self): track_flags |= 0x40 track_packed = struct.pack( self.__CUESHEET_TRACK_FORMAT, track.start_offset, - track.track_number, track.isrc, track_flags, + track.track_number, track.isrc or b"\0", track_flags, len(track.indexes)) f.write(track_packed) for index in track.indexes: @@ -611,7 +610,7 @@ def load(self, data): self.data = data.read(length) def write(self): - f = cBytesIO() + f = BytesIO() mime = self.mime.encode('UTF-8') f.write(struct.pack('>2I', self.type, len(mime))) f.write(mime) @@ -688,7 +687,6 @@ class FLAC(mutagen.FileType): _mimes = ["audio/flac", "audio/x-flac", "application/x-flac"] - info = None tags = None METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict, @@ -714,7 +712,7 @@ def __read_metadata_block(self, fileobj): if block_type._distrust_size: # Some jackass is writing broken Metadata block length # for Vorbis comment blocks, and the FLAC reference - # implementaton can parse them (mostly by accident), + # implementation can parse them (mostly by accident), # so we have to too. Instead of parsing the size # given, parse an actual Vorbis comment, leaving # fileobj in the right position. @@ -798,7 +796,7 @@ def load(self, filething): pass try: - self.metadata_blocks[0].length + self.info.length except (AttributeError, IndexError): raise FLACNoHeaderError("Stream info block not found") @@ -812,7 +810,11 @@ def load(self, filething): @property def info(self): - return self.metadata_blocks[0] + streaminfo_blocks = [ + block for block in self.metadata_blocks + if block.code == StreamInfo.code + ] + return streaminfo_blocks[0] def add_picture(self, picture): """Add a new picture to the file. @@ -830,8 +832,6 @@ def clear_pictures(self): @property def pictures(self): - """list[Picture]: List of embedded pictures""" - return [b for b in self.metadata_blocks if b.code == Picture.code] @convert_error(IOError, error) @@ -846,6 +846,15 @@ def save(self, filething=None, deleteid3=False, padding=None): If no filename is given, the one most recently loaded is used. """ + # add new cuesheet and seektable + if self.cuesheet and self.cuesheet not in self.metadata_blocks: + if not isinstance(self.cuesheet, CueSheet): + raise ValueError("Invalid cuesheet object type!") + self.metadata_blocks.append(self.cuesheet) + if self.seektable and self.seektable not in self.metadata_blocks: + if not isinstance(self.seektable, SeekTable): + raise ValueError("Invalid seektable object type!") + self.metadata_blocks.append(self.seektable) self._save(filething, self.metadata_blocks, deleteid3, padding) diff --git a/script.module.mutagen/lib/mutagen/id3/__init__.py b/script.module.mutagen/lib/mutagen/id3/__init__.py index 9033c76ca..b70638bd7 100644 --- a/script.module.mutagen/lib/mutagen/id3/__init__.py +++ b/script.module.mutagen/lib/mutagen/id3/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # 2006 Lukas Lalinsky # 2013 Christoph Reiter @@ -61,7 +60,7 @@ # support open(filename) as interface Open = ID3 -# pyflakes +# flake8 ID3, ID3FileType, delete, ID3v1SaveOptions, Encoding, PictureType, CTOCFlags, ID3TimeStamp, Frames, Frames_2_2, Frame, TextFrame, UrlFrame, UrlFrameU, TimeStampTextFrame, BinaryFrame, NumericPartTextFrame, NumericTextFrame, diff --git a/script.module.mutagen/lib/mutagen/id3/_file.py b/script.module.mutagen/lib/mutagen/id3/_file.py index ddba4850b..a2737e8fb 100644 --- a/script.module.mutagen/lib/mutagen/id3/_file.py +++ b/script.module.mutagen/lib/mutagen/id3/_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # 2006 Lukas Lalinsky # 2013 Christoph Reiter @@ -78,8 +77,6 @@ def __init__(self, *args, **kwargs): @property def version(self): - """`tuple`: ID3 tag version as a tuple (of the loaded file)""" - if self._header is not None: return self._header.version return self._version @@ -234,7 +231,7 @@ def save(self, filething=None, v1=1, v2_version=4, v23_sep='/', if 0, ID3v1 tags will be removed. if 1, ID3v1 tags will be updated but not added. if 2, ID3v1 tags will be created and/or updated - v2 (int): + v2_version (int): version of ID3v2 tags (3 or 4). v23_sep (text): the separator used to join multiple text values diff --git a/script.module.mutagen/lib/mutagen/id3/_frames.py b/script.module.mutagen/lib/mutagen/id3/_frames.py index f8c9a8e0d..bee693bdf 100644 --- a/script.module.mutagen/lib/mutagen/id3/_frames.py +++ b/script.module.mutagen/lib/mutagen/id3/_frames.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -8,6 +7,7 @@ import zlib from struct import unpack +from typing import Sequence from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch, \ ID3SaveConfig, error @@ -17,9 +17,7 @@ VolumeAdjustmentSpec, ChannelSpec, MultiSpec, SynchronizedTextSpec, \ KeyEventSpec, TimeStampSpec, EncodedNumericPartTextSpec, \ EncodedNumericTextSpec, SpecError, PictureTypeSpec, ID3FramesSpec, \ - Latin1TextListSpec, CTOCFlagsSpec, FrameIDSpec, RVASpec -from .._compat import text_type, string_types, swap_to_string, iteritems, \ - izip, itervalues + Latin1TextListSpec, CTOCFlagsSpec, FrameIDSpec, RVASpec, Spec def _bytes2key(b): @@ -51,8 +49,8 @@ class Frame(object): FLAG24_UNSYNCH = 0x0002 FLAG24_DATALEN = 0x0001 - _framespec = [] - _optionalspec = [] + _framespec: Sequence[Spec] = [] + _optionalspec: Sequence[Spec] = [] def __init__(self, *args, **kwargs): if len(args) == 1 and len(kwargs) == 0 and \ @@ -61,7 +59,7 @@ def __init__(self, *args, **kwargs): # ask the sub class to fill in our data other._to_other(self) else: - for checker, val in izip(self._framespec, args): + for checker, val in zip(self._framespec, args): setattr(self, checker.name, val) for checker in self._framespec[len(args):]: setattr(self, checker.name, @@ -277,6 +275,8 @@ def _fromData(cls, header, tflags, data): elif header.version >= header._V23: if tflags & Frame.FLAG23_COMPRESS: + if len(data) < 4: + raise ID3JunkFrameError('frame too small: %r' % data) usize, = unpack('>L', data[:4]) data = data[4:] if tflags & Frame.FLAG23_ENCRYPT: @@ -291,7 +291,7 @@ def _fromData(cls, header, tflags, data): frame._readData(header, data) return frame - def __hash__(self): + def __hash__(self: object): raise TypeError("Frame objects are unhashable") @@ -330,7 +330,7 @@ def __eq__(self, other): def _pprint(self): frame_pprint = u"" - for frame in itervalues(self.sub_frames): + for frame in self.sub_frames.values(): for line in frame.pprint().splitlines(): frame_pprint += "\n" + " " * 4 + line return u"%s time=%d..%d offset=%d..%d%s" % ( @@ -377,7 +377,6 @@ def _pprint(self): u",".join(self.child_element_ids), frame_pprint) -@swap_to_string class TextFrame(Frame): """Text strings. @@ -399,7 +398,7 @@ class TextFrame(Frame): ] def __bytes__(self): - return text_type(self).encode('utf-8') + return str(self).encode('utf-8') def __str__(self): return u'\u0000'.join(self.text) @@ -407,8 +406,8 @@ def __str__(self): def __eq__(self, other): if isinstance(other, bytes): return bytes(self) == other - elif isinstance(other, text_type): - return text_type(self) == other + elif isinstance(other, str): + return str(self) == other return self.text == other __hash__ = Frame.__hash__ @@ -481,7 +480,6 @@ def __pos__(self): return int(self.text[0].split("/")[0]) -@swap_to_string class TimeStampTextFrame(TextFrame): """A list of time stamps. @@ -495,7 +493,7 @@ class TimeStampTextFrame(TextFrame): ] def __bytes__(self): - return text_type(self).encode('utf-8') + return str(self).encode('utf-8') def __str__(self): return u','.join([stamp.text for stamp in self.text]) @@ -504,7 +502,6 @@ def _pprint(self): return u" / ".join([stamp.text for stamp in self.text]) -@swap_to_string class UrlFrame(Frame): """A frame containing a URL string. @@ -517,7 +514,7 @@ class UrlFrame(Frame): ASCII. """ - _framespec = [ + _framespec: Sequence[Spec] = [ Latin1TextSpec('url'), ] @@ -571,7 +568,7 @@ def __get_genres(self): genre_re = re.compile(r"((?:\((?P[0-9]+|RX|CR)\))*)(?P.+)?") for value in self.text: # 255 possible entries in id3v1 - if value.isdigit() and int(value) < 256: + if value.isdecimal() and int(value) < 256: try: genres.append(self.GENRES[int(value)]) except IndexError: @@ -587,7 +584,7 @@ def __get_genres(self): if genreid: for gid in genreid[1:-1].split(")("): if gid.isdigit() and int(gid) < len(self.GENRES): - gid = text_type(self.GENRES[int(gid)]) + gid = str(self.GENRES[int(gid)]) newgenres.append(gid) elif gid == "CR": newgenres.append(u"Cover") @@ -608,7 +605,7 @@ def __get_genres(self): return genres def __set_genres(self, genres): - if isinstance(genres, string_types): + if isinstance(genres, str): genres = [genres] self.text = [self.__decode(g) for g in genres] @@ -835,7 +832,7 @@ class TSOC(TextFrame): class TSOP(TextFrame): - "Perfomer Sort Order key" + "Performer Sort Order key" class TSOT(TextFrame): @@ -1044,7 +1041,6 @@ def __eq__(self, other): __hash__ = Frame.__hash__ -@swap_to_string class USLT(Frame): """Unsynchronised lyrics/text transcription. @@ -1078,7 +1074,6 @@ def _pprint(self): return "%s=%s=%s" % (self.desc, self.lang, self.text) -@swap_to_string class SYLT(Frame): """Synchronised lyrics/text.""" @@ -1109,13 +1104,13 @@ def __str__(self): for (text, time) in self.text) def __bytes__(self): - return text_type(self).encode("utf-8") + return str(self).encode("utf-8") class COMM(TextFrame): """User comment. - User comment frames have a descrption, like TXXX, and also a three + User comment frames have a description, like TXXX, and also a three letter ISO language code in the 'lang' attribute. """ @@ -1284,7 +1279,7 @@ def _merge_frame(self, other): return other def _pprint(self): - type_desc = text_type(self.type) + type_desc = str(self.type) if hasattr(self.type, "_pprint"): type_desc = self.type._pprint() @@ -1314,7 +1309,7 @@ def __pos__(self): return self.count def _pprint(self): - return text_type(self.count) + return str(self.count) class PCST(Frame): @@ -1333,7 +1328,7 @@ def __pos__(self): return self.value def _pprint(self): - return text_type(self.value) + return str(self.value) class POPM(Frame): @@ -1437,7 +1432,6 @@ def __pos__(self): return self.size -@swap_to_string class AENC(Frame): """Audio encryption. @@ -1554,7 +1548,6 @@ def _pprint(self): return "%s=%r" % (self.owner, self.data) -@swap_to_string class USER(Frame): """Terms of use. @@ -1590,7 +1583,6 @@ def _pprint(self): return "%r=%s" % (self.lang, self.text) -@swap_to_string class OWNE(Frame): """Ownership frame.""" @@ -1641,7 +1633,6 @@ def __eq__(self, other): __hash__ = Frame.__hash__ -@swap_to_string class ENCR(Frame): """Encryption method registration. @@ -1668,7 +1659,6 @@ def __eq__(self, other): __hash__ = Frame.__hash__ -@swap_to_string class GRID(Frame): """Group identification registration.""" @@ -1697,7 +1687,6 @@ def __eq__(self, other): __hash__ = Frame.__hash__ -@swap_to_string class PRIV(Frame): """Private frame.""" @@ -1723,7 +1712,6 @@ def _pprint(self): __hash__ = Frame.__hash__ -@swap_to_string class SIGN(Frame): """Signature frame.""" @@ -1907,7 +1895,7 @@ class TS2(TSO2): class TSP(TSOP): - "Perfomer Sort Order key" + "Performer Sort Order key" class TSC(TSOC): @@ -1943,7 +1931,7 @@ class TOT(TOAL): class TOA(TOPE): - "Original Artist/Perfomer" + "Original Artist/Performer" class TOL(TOLY): @@ -2007,7 +1995,7 @@ class STC(SYTC): class ULT(USLT): - "Unsychronised lyrics/text transcription" + "Unsynchronised lyrics/text transcription" class SLT(SYLT): @@ -2135,7 +2123,7 @@ def _to_other(self, other): k, v = None, None -for k, v in iteritems(globals()): +for k, v in globals().items(): if isinstance(v, type) and issubclass(v, Frame): v.__module__ = "mutagen.id3" diff --git a/script.module.mutagen/lib/mutagen/id3/_id3v1.py b/script.module.mutagen/lib/mutagen/id3/_id3v1.py index 40aded24d..4e1ca05ff 100644 --- a/script.module.mutagen/lib/mutagen/id3/_id3v1.py +++ b/script.module.mutagen/lib/mutagen/id3/_id3v1.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # 2006 Lukas Lalinsky # 2013 Christoph Reiter @@ -11,7 +10,7 @@ import errno from struct import error as StructError, unpack -from mutagen._util import chr_, text_type +from mutagen._util import bchr from ._frames import TCON, TRCK, COMM, TDRC, TYER, TALB, TPE1, TIT2 @@ -181,7 +180,7 @@ def MakeID3v1(id3): if "TRCK" in id3: try: - v1["track"] = chr_(+id3["TRCK"]) + v1["track"] = bchr(+id3["TRCK"]) except ValueError: v1["track"] = b"\x00" else: @@ -194,14 +193,14 @@ def MakeID3v1(id3): pass else: if genre in TCON.GENRES: - v1["genre"] = chr_(TCON.GENRES.index(genre)) + v1["genre"] = bchr(TCON.GENRES.index(genre)) if "genre" not in v1: v1["genre"] = b"\xff" if "TDRC" in id3: - year = text_type(id3["TDRC"]).encode('ascii') + year = str(id3["TDRC"]).encode('ascii') elif "TYER" in id3: - year = text_type(id3["TYER"]).encode('ascii') + year = str(id3["TYER"]).encode('ascii') else: year = b"" v1["year"] = (year + b"\x00\x00\x00\x00")[:4] diff --git a/script.module.mutagen/lib/mutagen/id3/_specs.py b/script.module.mutagen/lib/mutagen/id3/_specs.py index 637843333..44c191067 100644 --- a/script.module.mutagen/lib/mutagen/id3/_specs.py +++ b/script.module.mutagen/lib/mutagen/id3/_specs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # # This program is free software; you can redistribute it and/or modify @@ -10,10 +9,8 @@ import codecs from struct import unpack, pack -from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \ - xrange -from .._util import total_ordering, decode_terminated, enum, izip, flags, \ - cdata, encode_endian, intround +from .._util import total_ordering, decode_terminated, enum, flags, \ + cdata, encode_endian, intround, bchr from ._util import BitPaddedInt, is_valid_frame_id @@ -87,7 +84,7 @@ class PictureType(object): """Publisher/Studio logotype""" def _pprint(self): - return text_type(self).split(".", 1)[-1].lower().replace("_", " ") + return str(self).split(".", 1)[-1].lower().replace("_", " ") @flags @@ -165,11 +162,11 @@ def read(self, header, frame, data): return bytearray(data)[0], data[1:] def write(self, config, frame, value): - return chr_(value) + return bchr(value) def validate(self, frame, value): if value is not None: - chr_(value) + bchr(value) return value @@ -289,26 +286,22 @@ def read(s, header, frame, data): except UnicodeDecodeError: raise SpecError("not ascii") else: - if PY3: - chunk = ascii + chunk = ascii return chunk, data[s.len:] def write(self, config, frame, value): - if PY3: - value = value.encode("ascii") + + value = value.encode("ascii") return (bytes(value) + b'\x00' * self.len)[:self.len] def validate(self, frame, value): if value is None: raise TypeError - if PY3: - if not isinstance(value, str): - raise TypeError("%s has to be str" % self.name) - value.encode("ascii") - else: - if not isinstance(value, bytes): - value = value.encode("ascii") + + if not isinstance(value, str): + raise TypeError("%s has to be str" % self.name) + value.encode("ascii") if len(value) == self.len: return value @@ -424,7 +417,7 @@ def read(self, header, frame, data): def write(self, config, frame, value): if isinstance(value, bytes): return value - value = text_type(value).encode("ascii") + value = str(value).encode("ascii") return value def validate(self, frame, value): @@ -432,10 +425,10 @@ def validate(self, frame, value): raise TypeError if isinstance(value, bytes): return value - elif PY3: + else: raise TypeError("%s has to be bytes" % self.name) - value = text_type(value).encode("ascii") + value = str(value).encode("ascii") return value @@ -493,7 +486,7 @@ def write(self, config, frame, value): raise SpecError(e) def validate(self, frame, value): - return text_type(value) + return str(value) class MultiSpec(Spec): @@ -522,32 +515,32 @@ def write(self, config, frame, value): data.append(self.specs[0].write(config, frame, v)) else: for record in value: - for v, s in izip(record, self.specs): + for v, s in zip(record, self.specs): data.append(s.write(config, frame, v)) return b''.join(data) def validate(self, frame, value): - if self.sep and isinstance(value, string_types): + if self.sep and isinstance(value, str): value = value.split(self.sep) if isinstance(value, list): if len(self.specs) == 1: return [self.specs[0].validate(frame, v) for v in value] else: return [ - [s.validate(frame, v) for (v, s) in izip(val, self.specs)] + [s.validate(frame, v) for (v, s) in zip(val, self.specs)] for val in value] raise ValueError('Invalid MultiSpec data: %r' % value) def _validate23(self, frame, value, **kwargs): if len(self.specs) != 1: return [[s._validate23(frame, v, **kwargs) - for (v, s) in izip(val, self.specs)] + for (v, s) in zip(val, self.specs)] for val in value] spec = self.specs[0] # Merge single text spec multispecs only. - # (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame) + # (TimeStampSpec being the exception, but it's not a valid v2.3 frame) if not isinstance(spec, EncodedTextSpec) or \ isinstance(spec, TimeStampSpec): return value @@ -582,7 +575,7 @@ def write(self, config, data, value): return value.encode('latin1') + b'\x00' def validate(self, frame, value): - return text_type(value) + return str(value) class ID3FramesSpec(Spec): @@ -632,7 +625,7 @@ def __init__(self, name, default=[]): def read(self, header, frame, data): count, data = self._bspec.read(header, frame, data) entries = [] - for i in xrange(count): + for i in range(count): entry, data = self._lspec.read(header, frame, data) entries.append(entry) return entries, data @@ -647,7 +640,6 @@ def validate(self, frame, value): return [self._lspec.validate(frame, v) for v in value] -@swap_to_string @total_ordering class ID3TimeStamp(object): """A time stamp in ID3v2 format. @@ -665,10 +657,8 @@ class ID3TimeStamp(object): def __init__(self, text): if isinstance(text, ID3TimeStamp): text = text.text - elif not isinstance(text, text_type): - if PY3: - raise TypeError("not a str") - text = text.decode("utf-8") + elif not isinstance(text, str): + raise TypeError("not a str") self.text = text @@ -736,7 +726,7 @@ def validate(self, frame, value): class ChannelSpec(ByteSpec): (OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE, - BACKCENTRE, SUBWOOFER) = xrange(9) + BACKCENTRE, SUBWOOFER) = range(9) class VolumeAdjustmentSpec(Spec): @@ -771,7 +761,7 @@ def read(self, header, frame, data): if vol_bytes + 1 > len(data): raise SpecError("not enough frame data") shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8 - for i in xrange(1, vol_bytes + 1): + for i in range(1, vol_bytes + 1): peak *= 256 peak += data_array[i] peak *= 2 ** shift diff --git a/script.module.mutagen/lib/mutagen/id3/_tags.py b/script.module.mutagen/lib/mutagen/id3/_tags.py index d21ced3eb..48c741a36 100644 --- a/script.module.mutagen/lib/mutagen/id3/_tags.py +++ b/script.module.mutagen/lib/mutagen/id3/_tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2005 Michael Urman # Copyright 2016 Christoph Reiter # @@ -9,10 +8,10 @@ import re import struct +from itertools import zip_longest from mutagen._tags import Tags from mutagen._util import DictProxy, convert_error, read_full -from mutagen._compat import PY3, itervalues, izip_longest from ._util import BitPaddedInt, unsynch, ID3JunkFrameError, \ ID3EncryptionUnsupportedError, is_valid_frame_id, error, \ @@ -83,10 +82,7 @@ def __init__(self, fileobj=None): if self.f_extended: extsize_data = read_full(fileobj, 4) - if PY3: - frame_id = extsize_data.decode("ascii", "replace") - else: - frame_id = extsize_data + frame_id = extsize_data.decode("ascii", "replace") if frame_id in Frames: # Some tagger sets the extended header flag but @@ -112,6 +108,9 @@ def __init__(self, fileobj=None): # excludes itself." extsize = struct.unpack('>L', extsize_data)[0] + if extsize < 0: + raise error("invalid extended header size") + self._extdata = read_full(fileobj, extsize) @@ -132,11 +131,10 @@ def determine_bpi(data, frames, EMPTY=b"\x00" * 10): name, size, flags = struct.unpack('>4sLH', part) size = BitPaddedInt(size) o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue + try: + name = name.decode("ascii") + except UnicodeDecodeError: + continue if name in frames: asbpi += 1 else: @@ -152,11 +150,10 @@ def determine_bpi(data, frames, EMPTY=b"\x00" * 10): break name, size, flags = struct.unpack('>4sLH', part) o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue + try: + name = name.decode("ascii") + except UnicodeDecodeError: + continue if name in frames: asint += 1 else: @@ -192,7 +189,7 @@ def _write(self, config): order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] framedata = [ - (f, save_frame(f, config=config)) for f in itervalues(self)] + (f, save_frame(f, config=config)) for f in self.values()] def get_prio(frame): try: @@ -259,12 +256,12 @@ def delall(self, key): """ if key in self: - del(self[key]) + del self[key] else: key = key + ":" for k in list(self.keys()): if k.startswith(key): - del(self[k]) + del self[k] def pprint(self): """ @@ -372,17 +369,20 @@ def update_to_v24(self): # TDAT, TYER, and TIME have been turned into TDRC. timestamps = [] old_frames = [self.pop(n, []) for n in ["TYER", "TDAT", "TIME"]] - for y, d, t in izip_longest(*old_frames, fillvalue=u""): - ym = re.match(r"([0-9]+)\Z", y) - dm = re.match(r"([0-9]{2})([0-9]{2})\Z", d) - tm = re.match(r"([0-9]{2})([0-9]{2})\Z", t) + for tyer, tdat, time in zip_longest(*old_frames, fillvalue=""): + ym = re.match(r"([0-9]{4})(-[0-9]{2}-[0-9]{2})?\Z", tyer) + dm = re.match(r"([0-9]{2})([0-9]{2})\Z", tdat) + tm = re.match(r"([0-9]{2})([0-9]{2})\Z", time) timestamp = "" if ym: - timestamp += u"%s" % ym.groups() + (year, month_day) = ym.groups() + timestamp += "%s" % year if dm: - timestamp += u"-%s-%s" % dm.groups()[::-1] + month_day = "-%s-%s" % dm.groups()[::-1] + if month_day: + timestamp += month_day if tm: - timestamp += u"T%s:%s:00" % tm.groups() + timestamp += "T%s:%s:00" % tm.groups() if timestamp: timestamps.append(timestamp) if timestamps and "TDRC" not in self: @@ -407,7 +407,7 @@ def update_to_v24(self): # should have been removed already. for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]: if key in self: - del(self[key]) + del self[key] # Recurse into chapters for f in self.getall("CHAP"): @@ -471,7 +471,7 @@ def update_to_v23(self): for key in v24_frames: if key in self: - del(self[key]) + del self[key] # Recurse into chapters for f in self.getall("CHAP"): @@ -533,8 +533,7 @@ def save_frame(frame, name=None, config=None): frame_name = name else: frame_name = type(frame).__name__ - if PY3: - frame_name = frame_name.encode("ascii") + frame_name = frame_name.encode("ascii") header = struct.pack('>4s4sH', frame_name, datasize, flags) return header + framedata @@ -575,11 +574,10 @@ def read_frames(id3, data, frames): if size == 0: continue # drop empty frames - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue + try: + name = name.decode('ascii') + except UnicodeDecodeError: + continue try: # someone writes 2.3 frames with 2.2 names @@ -614,11 +612,10 @@ def read_frames(id3, data, frames): if size == 0: continue # drop empty frames - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue + try: + name = name.decode('ascii') + except UnicodeDecodeError: + continue try: tag = frames[name] diff --git a/script.module.mutagen/lib/mutagen/id3/_util.py b/script.module.mutagen/lib/mutagen/id3/_util.py index 93bb264ee..699c5dede 100644 --- a/script.module.mutagen/lib/mutagen/id3/_util.py +++ b/script.module.mutagen/lib/mutagen/id3/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2005 Michael Urman # 2013 Christoph Reiter # 2014 Ben Ockmore @@ -8,7 +7,8 @@ # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -from mutagen._compat import long_, integer_types, PY3 +from typing import Union + from mutagen._util import MutagenError @@ -46,7 +46,7 @@ class ID3JunkFrameError(error): class unsynch(object): @staticmethod - def decode(value): + def decode(value: bytes) -> bytes: fragments = bytearray(value).split(b'\xff') if len(fragments) > 1 and not fragments[-1]: raise ValueError('string ended unsafe') @@ -61,7 +61,7 @@ def decode(value): return bytes(bytearray(b'\xff').join(fragments)) @staticmethod - def encode(value): + def encode(value: bytes) -> bytes: fragments = bytearray(value).split(b'\xff') for f in fragments[1:]: if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00): @@ -75,7 +75,8 @@ def as_str(self, width=4, minwidth=4): return self.to_str(self, self.bits, self.bigendian, width, minwidth) @staticmethod - def to_str(value, bits=7, bigendian=True, width=4, minwidth=4): + def to_str(value: int, bits: int = 7, bigendian: bool = True, + width: int = 4, minwidth: int = 4) -> bytes: mask = (1 << bits) - 1 if width != -1: @@ -103,14 +104,14 @@ def to_str(value, bits=7, bigendian=True, width=4, minwidth=4): return bytes(bytes_) @staticmethod - def has_valid_padding(value, bits=7): + def has_valid_padding(value: Union[int, bytes], bits: int = 7) -> bool: """Whether the padding bits are all zero""" assert bits <= 8 mask = (((1 << (8 - bits)) - 1) << bits) - if isinstance(value, integer_types): + if isinstance(value, int): while value: if value & mask: return False @@ -133,7 +134,7 @@ def __new__(cls, value, bits=7, bigendian=True): numeric_value = 0 shift = 0 - if isinstance(value, integer_types): + if isinstance(value, int): if value < 0: raise ValueError while value: @@ -149,21 +150,12 @@ def __new__(cls, value, bits=7, bigendian=True): else: raise TypeError - if isinstance(numeric_value, int): - self = int.__new__(BitPaddedInt, numeric_value) - else: - self = long_.__new__(BitPaddedLong, numeric_value) + self = int.__new__(BitPaddedInt, numeric_value) self.bits = bits self.bigendian = bigendian return self -if PY3: - BitPaddedLong = BitPaddedInt -else: - class BitPaddedLong(long_, _BitPaddedMixin): - pass - class ID3BadUnsynchData(error, ValueError): """Deprecated""" diff --git a/script.module.mutagen/lib/mutagen/m4a.py b/script.module.mutagen/lib/mutagen/m4a.py index c7583f8ea..eea2136b2 100644 --- a/script.module.mutagen/lib/mutagen/m4a.py +++ b/script.module.mutagen/lib/mutagen/m4a.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify diff --git a/script.module.mutagen/lib/mutagen/monkeysaudio.py b/script.module.mutagen/lib/mutagen/monkeysaudio.py index 82bfcd246..30cf78c0f 100644 --- a/script.module.mutagen/lib/mutagen/monkeysaudio.py +++ b/script.module.mutagen/lib/mutagen/monkeysaudio.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Lukas Lalinsky # # This program is free software; you can redistribute it and/or modify @@ -18,10 +17,9 @@ import struct -from ._compat import endswith from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete -from mutagen._util import cdata, convert_error +from mutagen._util import cdata, convert_error, endswith class MonkeysAudioHeaderError(error): diff --git a/script.module.mutagen/lib/mutagen/mp3/__init__.py b/script.module.mutagen/lib/mutagen/mp3/__init__.py index 1246d74e2..fa7b41d37 100644 --- a/script.module.mutagen/lib/mutagen/mp3/__init__.py +++ b/script.module.mutagen/lib/mutagen/mp3/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -12,8 +11,7 @@ from mutagen import StreamInfo from mutagen._util import MutagenError, enum, BitReader, BitReaderError, \ - convert_error, intround -from mutagen._compat import endswith, xrange + convert_error, intround, endswith from mutagen.id3 import ID3FileType, delete from mutagen.id3._util import BitPaddedInt @@ -75,27 +73,27 @@ def _guess_xing_bitrate_mode(xing): # Mode values. -STEREO, JOINTSTEREO, DUALCHANNEL, MONO = xrange(4) +STEREO, JOINTSTEREO, DUALCHANNEL, MONO = range(4) class MPEGFrame(object): # Map (version, layer) tuples to bitrates. __BITRATE = { - (1, 1): [0, 32, 64, 96, 128, 160, 192, 224, - 256, 288, 320, 352, 384, 416, 448], - (1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 160, 192, 224, 256, 320, 384], - (1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112, - 128, 160, 192, 224, 256, 320], - (2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 144, 160, 176, 192, 224, 256], - (2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, - 80, 96, 112, 128, 144, 160], + (1., 1): [0, 32, 64, 96, 128, 160, 192, 224, + 256, 288, 320, 352, 384, 416, 448], + (1., 2): [0, 32, 48, 56, 64, 80, 96, 112, 128, + 160, 192, 224, 256, 320, 384], + (1., 3): [0, 32, 40, 48, 56, 64, 80, 96, 112, + 128, 160, 192, 224, 256, 320], + (2., 1): [0, 32, 48, 56, 64, 80, 96, 112, 128, + 144, 160, 176, 192, 224, 256], + (2., 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, + 80, 96, 112, 128, 144, 160], } __BITRATE[(2, 3)] = __BITRATE[(2, 2)] - for i in xrange(1, 4): + for i in range(1, 4): __BITRATE[(2.5, i)] = __BITRATE[(2, i)] # Map version to sample rates. @@ -370,7 +368,7 @@ def __init__(self, fileobj, offset=None): if max_syncs <= 0: break - for _ in xrange(enough_frames): + for _ in range(enough_frames): try: frame = MPEGFrame(fileobj) except HeaderNotFoundError: @@ -457,7 +455,11 @@ def mime(self): def score(filename, fileobj, header_data): filename = filename.lower() - return (header_data.startswith(b"ID3") * 2 + + return ((header_data.startswith(b"ID3") or + header_data.startswith(b'\xFF\xF2') or + header_data.startswith(b'\xFF\xF3') or + header_data.startswith(b'\xFF\xFA') or + header_data.startswith(b'\xFF\xFB')) * 2 + endswith(filename, b".mp3") + endswith(filename, b".mp2") + endswith(filename, b".mpg") + endswith(filename, b".mpeg")) @@ -480,4 +482,4 @@ class EasyMP3(MP3): """ from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 + ID3 = ID3 # type: ignore diff --git a/script.module.mutagen/lib/mutagen/mp3/_util.py b/script.module.mutagen/lib/mutagen/mp3/_util.py index fd1b5ca3f..b67ab6aad 100644 --- a/script.module.mutagen/lib/mutagen/mp3/_util.py +++ b/script.module.mutagen/lib/mutagen/mp3/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 Christoph Reiter # # This program is free software; you can redistribute it and/or modify @@ -13,9 +12,10 @@ from __future__ import division from functools import partial +from io import BytesIO +from typing import List -from mutagen._util import cdata, BitReader -from mutagen._compat import xrange, iterbytes, cBytesIO +from mutagen._util import cdata, BitReader, iterbytes class LAMEError(Exception): @@ -109,7 +109,7 @@ def __init__(self, xing, fileobj): raise LAMEError("Not enough data") # extended lame header - r = BitReader(cBytesIO(payload)) + r = BitReader(BytesIO(payload)) revision = r.bits(4) if revision != 0: raise LAMEError("unsupported header revision %d" % revision) @@ -356,7 +356,7 @@ class XingHeader(object): bytes = -1 """Number of bytes, -1 if unknown""" - toc = [] + toc: List[int] = [] """List of 100 file offsets in percent encoded as 0-255. E.g. entry 50 contains the file offset in percent at 50% play time. Empty if unknown. @@ -474,7 +474,7 @@ class VBRIHeader(object): toc_frames = 0 """Number of frames per table entry""" - toc = [] + toc: List[int] = [] """TOC""" def __init__(self, fileobj): @@ -515,7 +515,7 @@ def __init__(self, fileobj): else: raise VBRIHeaderError("Invalid TOC entry size") - self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)] + self.toc = [unpack(i)[0] for i in range(0, toc_size, toc_entry_size)] @classmethod def get_offset(cls, info): diff --git a/script.module.mutagen/lib/mutagen/mp4/__init__.py b/script.module.mutagen/lib/mutagen/mp4/__init__.py index 9f2721e40..9e1757ecb 100644 --- a/script.module.mutagen/lib/mutagen/mp4/__init__.py +++ b/script.module.mutagen/lib/mutagen/mp4/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -25,13 +24,15 @@ import struct import sys +from io import BytesIO +from collections.abc import Sequence +from datetime import timedelta from mutagen import FileType, Tags, StreamInfo, PaddingInfo from mutagen._constants import GENRES from mutagen._util import cdata, insert_bytes, DictProxy, MutagenError, \ - hashable, enum, get_size, resize_bytes, loadfile, convert_error -from mutagen._compat import (reraise, PY2, string_types, text_type, chr_, - iteritems, PY3, cBytesIO, izip, xrange) + hashable, enum, get_size, resize_bytes, loadfile, convert_error, bchr, \ + reraise from ._atom import Atoms, Atom, AtomError from ._util import parse_full_atom from ._as_entry import AudioSampleEntry, ASEntryError @@ -205,14 +206,10 @@ def __repr__(self): def _name2key(name): - if PY2: - return name return name.decode("latin-1") def _key2name(key): - if PY2: - return key return key.encode("latin-1") @@ -246,11 +243,11 @@ def _item_sort_key(key, value): "\xa9gen", "gnre", "trkn", "disk", "\xa9day", "cpil", "pgap", "pcst", "tmpo", "\xa9too", "----", "covr", "\xa9lyr"] - order = dict(izip(order, xrange(len(order)))) + order = dict(zip(order, range(len(order)))) last = len(order) # If there's no key-based way to distinguish, order by length. # If there's still no way, go by string comparison on the - # values, so we at least have something determinstic. + # values, so we at least have something deterministic. return (order.get(key[:4], last), len(repr(value)), repr(value)) @@ -367,8 +364,7 @@ def load(self, atoms, fileobj): self.__parse_text(atom, data, implicit=False) except MP4MetadataError: # parsing failed, save them so we can write them back - key = _name2key(atom.name) - self._failed_atoms.setdefault(key, []).append(data) + self._failed_atoms.setdefault(_name2key(atom.name), []).append(data) def __setitem__(self, key, value): if not isinstance(key, str): @@ -403,7 +399,7 @@ def save(self, filething=None, padding=None): except (TypeError, ValueError) as s: reraise(MP4MetadataValueError, s, sys.exc_info()[2]) - for key, failed in iteritems(self._failed_atoms): + for key, failed in self._failed_atoms.items(): # don't write atoms back if we have added a new one with # the same name, this excludes freeform which can have # multiple atoms with the same key (most parsers seem to be able @@ -523,10 +519,13 @@ def __update_offset_table(self, fileobj, fmt, atom, delta, offset): fileobj.seek(atom.offset + 12) data = fileobj.read(atom.length - 12) fmt = fmt % cdata.uint_be(data[:4]) - offsets = struct.unpack(fmt, data[4:]) - offsets = [o + (0, delta)[offset < o] for o in offsets] - fileobj.seek(atom.offset + 16) - fileobj.write(struct.pack(fmt, *offsets)) + try: + offsets = struct.unpack(fmt, data[4:]) + offsets = [o + (0, delta)[offset < o] for o in offsets] + fileobj.seek(atom.offset + 16) + fileobj.write(struct.pack(fmt, *offsets)) + except struct.error: + raise MP4MetadataError("wrong offset inside %r" % atom.name) def __update_tfhd(self, fileobj, atom, delta, offset): if atom.offset > offset: @@ -752,7 +751,7 @@ def __parse_bool(self, atom, data): def __render_bool(self, key, value): return self.__render_data( - key, 0, AtomDataType.INTEGER, [chr_(bool(value))]) + key, 0, AtomDataType.INTEGER, [bchr(bool(value))]) def __parse_cover(self, atom, data): values = [] @@ -816,18 +815,14 @@ def __parse_text(self, atom, data, implicit=True): self.__add(key, values) def __render_text(self, key, value, flags=AtomDataType.UTF8): - if isinstance(value, string_types): + if isinstance(value, str): value = [value] encoded = [] for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("%r not str" % v) - try: - v = v.decode("utf-8") - except (AttributeError, UnicodeDecodeError) as e: - raise TypeError(e) + if not isinstance(v, str): + raise TypeError("%r not str" % v) + encoded.append(v.encode("utf-8")) return self.__render_data(key, 0, flags, encoded) @@ -879,14 +874,14 @@ def delete(self, filename): def pprint(self): def to_line(key, value): - assert isinstance(key, text_type) - if isinstance(value, text_type): + assert isinstance(key, str) + if isinstance(value, str): return u"%s=%s" % (key, value) return u"%s=%r" % (key, value) values = [] - for key, value in sorted(iteritems(self)): - if not isinstance(key, text_type): + for key, value in sorted(self.items()): + if not isinstance(key, str): key = key.decode("latin-1") if key == "covr": values.append(u"%s=%s" % (key, u", ".join( @@ -899,6 +894,123 @@ def to_line(key, value): return u"\n".join(values) +class Chapter(object): + """Chapter() + + Chapter information container + """ + def __init__(self, start, title): + self.start = start + self.title = title + + +class MP4Chapters(Sequence): + """MP4Chapters() + + MPEG-4 Chapter information. + + Supports the 'moov.udta.chpl' box. + + A sequence of Chapter objects with the following members: + start (`float`): position from the start of the file in seconds + title (`str`): title of the chapter + + """ + + def __init__(self, *args, **kwargs): + self._timescale = None + self._duration = None + self._chapters = [] + super(MP4Chapters, self).__init__() + if args or kwargs: + self.load(*args, **kwargs) + + def __len__(self): + return self._chapters.__len__() + + def __getitem__(self, key): + return self._chapters.__getitem__(key) + + def load(self, atoms, fileobj): + try: + mvhd = atoms.path(b"moov", b"mvhd")[-1] + except KeyError as key: + return MP4MetadataError(key) + + self._parse_mvhd(mvhd, fileobj) + + if not self._timescale: + raise MP4MetadataError("Unable to get timescale") + + try: + chpl = atoms.path(b"moov", b"udta", b"chpl")[-1] + except KeyError as key: + return MP4MetadataError(key) + + self._parse_chpl(chpl, fileobj) + + @classmethod + def _can_load(cls, atoms): + return b"moov.udta.chpl" in atoms and b"moov.mvhd" in atoms + + def _parse_mvhd(self, atom, fileobj): + assert atom.name == b"mvhd" + + ok, data = atom.read(fileobj) + if not ok: + raise MP4StreamInfoError("Invalid mvhd") + + version = data[0] + + pos = 4 + if version == 0: + pos += 8 # created, modified + + self._timescale = struct.unpack(">l", data[pos:pos + 4])[0] + pos += 4 + + self._duration = struct.unpack(">l", data[pos:pos + 4])[0] + pos += 4 + elif version == 1: + pos += 16 # created, modified + + self._timescale = struct.unpack(">l", data[pos:pos + 4])[0] + pos += 4 + + self._duration = struct.unpack(">q", data[pos:pos + 8])[0] + pos += 8 + + def _parse_chpl(self, atom, fileobj): + assert atom.name == b"chpl" + + ok, data = atom.read(fileobj) + if not ok: + raise MP4StreamInfoError("Invalid atom") + + chapters = data[8] + + pos = 9 + for i in range(chapters): + start = struct.unpack(">Q", data[pos:pos + 8])[0] / 10000 + pos += 8 + + title_len = data[pos] + pos += 1 + + try: + title = data[pos:pos + title_len].decode() + except UnicodeDecodeError as e: + raise MP4MetadataError("chapter %d title: %s" % (i, e)) + pos += title_len + + self._chapters.append(Chapter(start / self._timescale, title)) + + def pprint(self): + chapters = ["%s %s" % (timedelta(seconds=chapter.start), chapter.title) + for chapter in self._chapters] + return "chapters=%s" % '\n '.join(chapters) + + class MP4Info(StreamInfo): """MP4Info() @@ -1014,7 +1126,7 @@ def _parse_stsd(self, atom, fileobj): return # look at the first entry if there is one - entry_fileobj = cBytesIO(data[offset:]) + entry_fileobj = BytesIO(data[offset:]) try: entry_atom = Atom(entry_fileobj) except AtomError as e: @@ -1054,6 +1166,7 @@ class MP4(FileType): """ MP4Tags = MP4Tags + MP4Chapters = MP4Chapters _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] @@ -1086,6 +1199,16 @@ def load(self, filething): except Exception as err: reraise(MP4MetadataError, err, sys.exc_info()[2]) + if not MP4Chapters._can_load(atoms): + self.chapters = None + else: + try: + self.chapters = self.MP4Chapters(atoms, fileobj) + except error: + raise + except Exception as err: + reraise(MP4MetadataError, err, sys.exc_info()[2]) + @property def _padding(self): if self.tags is None: @@ -1098,6 +1221,28 @@ def save(self, *args, **kwargs): super(MP4, self).save(*args, **kwargs) + def pprint(self): + """ + Returns: + text: stream information, comment key=value pairs and chapters. + """ + stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) + try: + tags = self.tags.pprint() + except AttributeError: + pass + else: + stream += ((tags and "\n" + tags) or "") + + try: + chapters = self.chapters.pprint() + except AttributeError: + pass + else: + stream += "\n" + chapters + + return stream + def add_tags(self): if self.tags is None: self.tags = self.MP4Tags() diff --git a/script.module.mutagen/lib/mutagen/mp4/_as_entry.py b/script.module.mutagen/lib/mutagen/mp4/_as_entry.py index 15b7e6bc2..e5013c421 100644 --- a/script.module.mutagen/lib/mutagen/mp4/_as_entry.py +++ b/script.module.mutagen/lib/mutagen/mp4/_as_entry.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2014 Christoph Reiter # # This program is free software; you can redistribute it and/or modify @@ -6,10 +5,10 @@ # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. -from mutagen._compat import cBytesIO, xrange +from io import BytesIO + from mutagen.aac import ProgramConfigElement from mutagen._util import BitReader, BitReaderError, cdata -from mutagen._compat import text_type from ._util import parse_full_atom from ._atom import Atom, AtomError @@ -47,7 +46,7 @@ def __init__(self, atom, fileobj): if not ok: raise ASEntryError("too short %r atom" % atom.name) - fileobj = cBytesIO(data) + fileobj = BytesIO(data) r = BitReader(fileobj) try: @@ -93,7 +92,7 @@ def _parse_dac3(self, atom, fileobj): ok, data = atom.read(fileobj) if not ok: raise ASEntryError("truncated %s atom" % atom.name) - fileobj = cBytesIO(data) + fileobj = BytesIO(data) r = BitReader(fileobj) # sample_rate in AudioSampleEntry covers values in @@ -134,7 +133,7 @@ def _parse_alac(self, atom, fileobj): if version != 0: raise ASEntryError("Unsupported version %d" % version) - fileobj = cBytesIO(data) + fileobj = BytesIO(data) r = BitReader(fileobj) try: @@ -168,7 +167,7 @@ def _parse_esds(self, esds, fileobj): if version != 0: raise ASEntryError("Unsupported version %d" % version) - fileobj = cBytesIO(data) + fileobj = BytesIO(data) r = BitReader(fileobj) try: @@ -204,14 +203,14 @@ class DescriptorError(Exception): class BaseDescriptor(object): - TAG = None + TAG: int @classmethod def _parse_desc_length_file(cls, fileobj): """May raise ValueError""" value = 0 - for i in xrange(4): + for i in range(4): try: b = cdata.uint8(fileobj.read(1)) except cdata.error as e: @@ -239,9 +238,13 @@ def parse(cls, fileobj): pos = fileobj.tell() instance = cls(fileobj, length) left = length - (fileobj.tell() - pos) - if left < 0: - raise DescriptorError("descriptor parsing read too much data") - fileobj.seek(left, 1) + if left > 0: + fileobj.seek(left, 1) + else: + # XXX: In case the instance length is shorted than the content + # assume the size is wrong and just continue parsing + # https://github.com/quodlibet/mutagen/issues/444 + pass return instance @@ -371,7 +374,7 @@ def description(self): name += "+SBR" if self.psPresentFlag == 1: name += "+PS" - return text_type(name) + return str(name) @property def sample_rate(self): diff --git a/script.module.mutagen/lib/mutagen/mp4/_atom.py b/script.module.mutagen/lib/mutagen/mp4/_atom.py index cd43a1fe3..e1bf15351 100644 --- a/script.module.mutagen/lib/mutagen/mp4/_atom.py +++ b/script.module.mutagen/lib/mutagen/mp4/_atom.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -8,7 +7,6 @@ import struct -from mutagen._compat import PY2 from mutagen._util import convert_error # This is not an exhaustive list of container atoms, but just the @@ -180,12 +178,8 @@ def __getitem__(self, names): specifying the complete path ('moov.udta'). """ - if PY2: - if isinstance(names, basestring): - names = names.split(b".") - else: - if isinstance(names, bytes): - names = names.split(b".") + if isinstance(names, bytes): + names = names.split(b".") for child in self.atoms: if child.name == names[0]: diff --git a/script.module.mutagen/lib/mutagen/mp4/_util.py b/script.module.mutagen/lib/mutagen/mp4/_util.py index 43d81c82a..b8e208a14 100644 --- a/script.module.mutagen/lib/mutagen/mp4/_util.py +++ b/script.module.mutagen/lib/mutagen/mp4/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2014 Christoph Reiter # # This program is free software; you can redistribute it and/or modify diff --git a/script.module.mutagen/lib/mutagen/musepack.py b/script.module.mutagen/lib/mutagen/musepack.py index c966d9395..944de6d54 100644 --- a/script.module.mutagen/lib/mutagen/musepack.py +++ b/script.module.mutagen/lib/mutagen/musepack.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Lukas Lalinsky # Copyright (C) 2012 Christoph Reiter # @@ -19,11 +18,10 @@ import struct -from ._compat import endswith, xrange from mutagen import StreamInfo from mutagen.apev2 import APEv2File, error, delete from mutagen.id3._util import BitPaddedInt -from mutagen._util import cdata, convert_error, intround +from mutagen._util import cdata, convert_error, intround, endswith class MusepackHeaderError(error): @@ -44,7 +42,7 @@ def _parse_sv8_int(fileobj, limit=9): """ num = 0 - for i in xrange(limit): + for i in range(limit): c = fileobj.read(1) if len(c) != 1: raise EOFError @@ -143,9 +141,13 @@ def check_frame_key(key): # packets can be at maximum data_size big and are padded with zeros if frame_type == b"SH": + if frame_type not in mandatory_packets: + raise MusepackHeaderError("Duplicate SH packet") mandatory_packets.remove(frame_type) self.__parse_stream_header(fileobj, data_size) elif frame_type == b"RG": + if frame_type not in mandatory_packets: + raise MusepackHeaderError("Duplicate RG packet") mandatory_packets.remove(frame_type) self.__parse_replaygain_packet(fileobj, data_size) else: @@ -184,9 +186,13 @@ def __parse_stream_header(self, fileobj, data_size): remaining_size -= l1 + l2 data = fileobj.read(remaining_size) - if len(data) != remaining_size: + if len(data) != remaining_size or len(data) < 2: raise MusepackHeaderError("SH packet ended unexpectedly.") - self.sample_rate = RATES[bytearray(data)[0] >> 5] + rate_index = (bytearray(data)[0] >> 5) + try: + self.sample_rate = RATES[rate_index] + except IndexError: + raise MusepackHeaderError("Invalid sample rate") self.channels = (bytearray(data)[1] >> 4) + 1 def __parse_replaygain_packet(self, fileobj, data_size): diff --git a/script.module.mutagen/lib/mutagen/ogg.py b/script.module.mutagen/lib/mutagen/ogg.py index 2dd0b3c28..263c939dd 100644 --- a/script.module.mutagen/lib/mutagen/ogg.py +++ b/script.module.mutagen/lib/mutagen/ogg.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -19,10 +18,14 @@ import struct import sys import zlib +from io import BytesIO +from typing import Type from mutagen import FileType -from mutagen._util import cdata, resize_bytes, MutagenError, loadfile, seek_end -from ._compat import cBytesIO, reraise, chr_, izip, xrange +from mutagen._util import cdata, resize_bytes, MutagenError, loadfile, \ + seek_end, bchr, reraise +from mutagen._file import StreamInfo +from mutagen._tags import Tags class error(MutagenError): @@ -37,7 +40,7 @@ class OggPage(object): A page is a header of 26 bytes, followed by the length of the data, followed by the data. - The constructor is givin a file-like object pointing to the start + The constructor is given a file-like object pointing to the start of an Ogg page. After the constructor is finished it is pointing to the start of the next page. @@ -145,11 +148,11 @@ def write(self): lacing_data = [] for datum in self.packets: quot, rem = divmod(len(datum), 255) - lacing_data.append(b"\xff" * quot + chr_(rem)) + lacing_data.append(b"\xff" * quot + bchr(rem)) lacing_data = b"".join(lacing_data) if not self.complete and lacing_data.endswith(b"\x00"): lacing_data = lacing_data[:-1] - data.append(chr_(len(lacing_data))) + data.append(bchr(len(lacing_data))) data.append(lacing_data) data.extend(self.packets) data = b"".join(data) @@ -164,7 +167,7 @@ def write(self): return data @property - def size(self): + def size(self) -> int: """Total frame size.""" size = 27 # Initial header size @@ -210,7 +213,7 @@ def renumber(fileobj, serial, start): to logical stream 'serial'. Other pages will be ignored. fileobj must point to the start of a valid Ogg page; any - occuring after it and part of the specified logical stream + occurring after it and part of the specified logical stream will be numbered. No adjustment will be made to the data in the pages nor the granule position; only the page number, and so also the CRC. @@ -267,11 +270,12 @@ def to_packets(pages, strict=False): else: sequence += 1 - if page.continued: - packets[-1].append(page.packets[0]) - else: - packets.append([page.packets[0]]) - packets.extend([p] for p in page.packets[1:]) + if page.packets: + if page.continued: + packets[-1].append(page.packets[0]) + else: + packets.append([page.packets[0]]) + packets.extend([p] for p in page.packets[1:]) return [b"".join(p) for p in packets] @@ -387,8 +391,8 @@ def replace(cls, fileobj, old_pages, new_pages): # Number the new pages starting from the first old page. first = old_pages[0].sequence - for page, seq in izip(new_pages, - xrange(first, first + len(new_pages))): + for page, seq in zip(new_pages, + range(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial @@ -416,7 +420,7 @@ def replace(cls, fileobj, old_pages, new_pages): offset_adjust = 0 new_data_end = None assert len(old_pages) == len(new_data) - for old_page, data in izip(old_pages, new_data): + for old_page, data in zip(old_pages, new_data): offset = old_page.offset + offset_adjust data_size = len(data) resize_bytes(fileobj, old_page.size, data_size, offset) @@ -425,7 +429,7 @@ def replace(cls, fileobj, old_pages, new_pages): new_data_end = offset + data_size offset_adjust += (data_size - old_page.size) - # Finally, if there's any discrepency in length, we need to + # Finally, if there's any discrepancy in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) @@ -460,7 +464,7 @@ def find_last(fileobj, serial, finishing=False): index = data.rindex(b"OggS") except ValueError: raise error("unable to find final Ogg header") - bytesobj = cBytesIO(data[index:]) + bytesobj = BytesIO(data[index:]) def is_valid(page): return not finishing or page.position != -1 @@ -506,9 +510,9 @@ class OggFileType(FileType): filething (filething) """ - _Info = None - _Tags = None - _Error = None + _Info: Type[StreamInfo] + _Tags: Type[Tags] + _Error: Type[error] _mimes = ["application/ogg", "application/x-ogg"] @loadfile() diff --git a/script.module.mutagen/lib/mutagen/oggflac.py b/script.module.mutagen/lib/mutagen/oggflac.py index bc0730945..9a4ce3e71 100644 --- a/script.module.mutagen/lib/mutagen/oggflac.py +++ b/script.module.mutagen/lib/mutagen/oggflac.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -18,8 +17,7 @@ __all__ = ["OggFLAC", "Open", "delete"] import struct - -from ._compat import cBytesIO +from io import BytesIO from mutagen import StreamInfo from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError @@ -65,7 +63,7 @@ def __init__(self, fileobj): self.serial = page.serial # Skip over the block header. - stringobj = cBytesIO(page.packets[0][17:]) + stringobj = BytesIO(page.packets[0][17:]) try: flac_info = FLACStreamInfo(stringobj) @@ -101,7 +99,7 @@ def __init__(self, fileobj, info): if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) - comment = cBytesIO(OggPage.to_packets(pages)[0][4:]) + comment = BytesIO(OggPage.to_packets(pages)[0][4:]) super(OggFLACVComment, self).__init__(comment, framing=False) def _inject(self, fileobj, padding_func): diff --git a/script.module.mutagen/lib/mutagen/oggopus.py b/script.module.mutagen/lib/mutagen/oggopus.py index df9c32e80..486b58531 100644 --- a/script.module.mutagen/lib/mutagen/oggopus.py +++ b/script.module.mutagen/lib/mutagen/oggopus.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2012, 2013 Christoph Reiter # # This program is free software; you can redistribute it and/or modify @@ -17,9 +16,9 @@ __all__ = ["OggOpus", "Open", "delete"] import struct +from io import BytesIO from mutagen import StreamInfo -from mutagen._compat import BytesIO from mutagen._util import get_size, loadfile, convert_error from mutagen._tags import PaddingInfo from mutagen._vorbis import VCommentDict diff --git a/script.module.mutagen/lib/mutagen/oggspeex.py b/script.module.mutagen/lib/mutagen/oggspeex.py index de02a4495..a3edb247f 100644 --- a/script.module.mutagen/lib/mutagen/oggspeex.py +++ b/script.module.mutagen/lib/mutagen/oggspeex.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify diff --git a/script.module.mutagen/lib/mutagen/oggtheora.py b/script.module.mutagen/lib/mutagen/oggtheora.py index 619b884a1..51f0a451d 100644 --- a/script.module.mutagen/lib/mutagen/oggtheora.py +++ b/script.module.mutagen/lib/mutagen/oggtheora.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -50,19 +49,22 @@ class OggTheoraInfo(StreamInfo): def __init__(self, fileobj): page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x80theora"): + while not page.packets or \ + not page.packets[0].startswith(b"\x80theora"): page = OggPage(fileobj) if not page.first: raise OggTheoraHeaderError( "page has ID header, but doesn't start a stream") data = page.packets[0] + if len(data) < 42: + raise OggTheoraHeaderError("Truncated header") vmaj, vmin = struct.unpack("2B", data[7:9]) if (vmaj, vmin) != (3, 2): raise OggTheoraHeaderError( "found Theora version %d.%d != 3.2" % (vmaj, vmin)) fps_num, fps_den = struct.unpack(">2I", data[22:30]) - if not fps_den: - raise OggTheoraHeaderError("fps_den is equal to zero") + if not fps_den or not fps_num: + raise OggTheoraHeaderError("FRN or FRD is equal to zero") self.fps = fps_num / float(fps_den) self.bitrate = cdata.uint_be(b"\x00" + data[37:40]) self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F @@ -75,6 +77,7 @@ def _post_tags(self, fileobj): position = page.position mask = (1 << self.granule_shift) - 1 frames = (position >> self.granule_shift) + (position & mask) + assert self.fps self.length = frames / float(self.fps) def pprint(self): @@ -93,7 +96,10 @@ def __init__(self, fileobj, info): if page.serial == info.serial: pages.append(page) complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0][7:] + packets = OggPage.to_packets(pages) + if not packets: + raise error("Missing metadata packet") + data = packets[0][7:] super(OggTheoraCommentDict, self).__init__(data, framing=False) self._padding = len(data) - self._size @@ -102,7 +108,8 @@ def _inject(self, fileobj, padding_func): fileobj.seek(0) page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x81theora"): + while not page.packets or \ + not page.packets[0].startswith(b"\x81theora"): page = OggPage(fileobj) old_pages = [page] diff --git a/script.module.mutagen/lib/mutagen/oggvorbis.py b/script.module.mutagen/lib/mutagen/oggvorbis.py index 30cb95a01..c1c907326 100644 --- a/script.module.mutagen/lib/mutagen/oggvorbis.py +++ b/script.module.mutagen/lib/mutagen/oggvorbis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -56,13 +55,20 @@ def __init__(self, fileobj): """Raises ogg.error, IOError""" page = OggPage(fileobj) + if not page.packets: + raise OggVorbisHeaderError("page has not packets") while not page.packets[0].startswith(b"\x01vorbis"): page = OggPage(fileobj) if not page.first: raise OggVorbisHeaderError( "page has ID header, but doesn't start a stream") + if len(page.packets[0]) < 28: + raise OggVorbisHeaderError( + "page contains a packet too short to be valid") (self.channels, self.sample_rate, max_bitrate, nominal_bitrate, - min_bitrate) = struct.unpack(" Tuple[int, int]: val = 0 while 1: try: @@ -34,12 +33,12 @@ def _var_int(data, offset=0): def _read_track(chunk): - """Retuns a list of midi events and tempo change events""" + """Returns a list of midi events and tempo change events""" TEMPO, MIDI = range(2) # Deviations: The running status should be reset on non midi events, but - # some files contain meta events inbetween. + # some files contain meta events in between. # TODO: Offset and time signature are not considered. tempos = [] @@ -123,7 +122,7 @@ def read_chunk(fileobj): # get a list of events and tempo changes for each track tracks = [] first_tempos = None - for tracknum in xrange(ntracks): + for tracknum in range(ntracks): identifier, chunk = read_chunk(fileobj) if identifier != b"MTrk": continue diff --git a/script.module.mutagen/lib/mutagen/tak.py b/script.module.mutagen/lib/mutagen/tak.py index 3606a0ce4..2c457c979 100644 --- a/script.module.mutagen/lib/mutagen/tak.py +++ b/script.module.mutagen/lib/mutagen/tak.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2008 Lukáš Lalinský # Copyright (C) 2019 Philipp Wolfer # @@ -21,7 +20,6 @@ import struct -from ._compat import endswith from mutagen import StreamInfo from mutagen.apev2 import ( APEv2File, @@ -33,6 +31,7 @@ BitReaderError, convert_error, enum, + endswith, ) @@ -163,6 +162,7 @@ def __init__(self, fileobj): raise TAKHeaderError("not a TAK file") bitreader = _LSBBitReader(fileobj) + found_stream_info = False while True: type = TAKMetadata(bitreader.bits(7)) bitreader.skip(1) # Unused @@ -174,12 +174,16 @@ def __init__(self, fileobj): break elif type == TAKMetadata.STREAM_INFO: self._parse_stream_info(bitreader, size) + found_stream_info = True elif type == TAKMetadata.ENCODER_INFO: self._parse_encoder_info(bitreader, data_size) assert bitreader.is_aligned() fileobj.seek(pos + size) + if not found_stream_info: + raise TAKHeaderError("missing stream info") + if self.sample_rate > 0: self.length = self.number_of_samples / float(self.sample_rate) diff --git a/script.module.mutagen/lib/mutagen/trueaudio.py b/script.module.mutagen/lib/mutagen/trueaudio.py index e62f45565..805a83b01 100644 --- a/script.module.mutagen/lib/mutagen/trueaudio.py +++ b/script.module.mutagen/lib/mutagen/trueaudio.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify @@ -10,17 +9,16 @@ True Audio is a lossless format designed for real-time encoding and decoding. This module is based on the documentation at -http://www.true-audio.com/TTA_Lossless_Audio_Codec\\_-_Format_Description +http://tausoft.org/wiki/True_Audio_Codec_Format True Audio files use ID3 tags. """ __all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"] -from ._compat import endswith from mutagen import StreamInfo from mutagen.id3 import ID3FileType, delete -from mutagen._util import cdata, MutagenError, convert_error +from mutagen._util import cdata, MutagenError, convert_error, endswith class error(MutagenError): @@ -49,9 +47,11 @@ def __init__(self, fileobj, offset): header = fileobj.read(18) if len(header) != 18 or not header.startswith(b"TTA"): raise TrueAudioHeaderError("TTA header not found") - self.sample_rate = cdata.int_le(header[10:14]) + self.sample_rate = cdata.uint_le(header[10:14]) samples = cdata.uint_le(header[14:18]) - self.length = float(samples) / self.sample_rate + self.length = 0.0 + if self.sample_rate != 0: + self.length = float(samples) / self.sample_rate def pprint(self): return u"True Audio, %.2f seconds, %d Hz." % ( @@ -99,4 +99,4 @@ class EasyTrueAudio(TrueAudio): """ from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 + ID3 = ID3 # type: ignore diff --git a/script.module.mutagen/lib/mutagen/wave.py b/script.module.mutagen/lib/mutagen/wave.py new file mode 100644 index 000000000..6391a5d5d --- /dev/null +++ b/script.module.mutagen/lib/mutagen/wave.py @@ -0,0 +1,209 @@ +# Copyright (C) 2017 Borewit +# Copyright (C) 2019-2020 Philipp Wolfer +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. + +"""Microsoft WAVE/RIFF audio file/stream information and tags.""" + +import sys +import struct + +from mutagen import StreamInfo, FileType + +from mutagen.id3 import ID3 +from mutagen._riff import RiffFile, InvalidChunk +from mutagen._iff import error as IffError +from mutagen.id3._util import ID3NoHeaderError, error as ID3Error +from mutagen._util import ( + convert_error, + endswith, + loadfile, + reraise, +) + +__all__ = ["WAVE", "Open", "delete"] + + +class error(IffError): + """WAVE stream parsing errors.""" + + +class _WaveFile(RiffFile): + """Representation of a RIFF/WAVE file""" + + def __init__(self, fileobj): + RiffFile.__init__(self, fileobj) + + if self.file_type != u'WAVE': + raise error("Expected RIFF/WAVE.") + + # Normalize ID3v2-tag-chunk to lowercase + if u'ID3' in self: + self[u'ID3'].id = u'id3' + + +class WaveStreamInfo(StreamInfo): + """WaveStreamInfo() + + Microsoft WAVE file information. + + Information is parsed from the 'fmt' & 'data'chunk of the RIFF/WAVE file + + Attributes: + length (`float`): audio length, in seconds + bitrate (`int`): audio bitrate, in bits per second + channels (`int`): The number of audio channels + sample_rate (`int`): audio sample rate, in Hz + bits_per_sample (`int`): The audio sample size + """ + + length = 0.0 + bitrate = 0 + channels = 0 + sample_rate = 0 + bits_per_sample = 0 + + SIZE = 16 + + @convert_error(IOError, error) + def __init__(self, fileobj): + """Raises error""" + + wave_file = _WaveFile(fileobj) + try: + format_chunk = wave_file[u'fmt'] + except KeyError as e: + raise error(str(e)) + + data = format_chunk.read() + if len(data) < 16: + raise InvalidChunk() + + # RIFF: http://soundfile.sapp.org/doc/WaveFormat/ + # Python struct.unpack: + # https://docs.python.org/2/library/struct.html#byte-order-size-and-alignment + info = struct.unpack(' 0: + try: + data_chunk = wave_file[u'data'] + self._number_of_samples = data_chunk.data_size / block_align + except KeyError: + pass + + if self.sample_rate > 0: + self.length = self._number_of_samples / self.sample_rate + + def pprint(self): + return u"%d channel RIFF @ %d bps, %s Hz, %.2f seconds" % ( + self.channels, self.bitrate, self.sample_rate, self.length) + + +class _WaveID3(ID3): + """A Wave file with ID3v2 tags""" + + def _pre_load_header(self, fileobj): + try: + fileobj.seek(_WaveFile(fileobj)[u'id3'].data_offset) + except (InvalidChunk, KeyError): + raise ID3NoHeaderError("No ID3 chunk") + + @convert_error(IOError, error) + @loadfile(writable=True) + def save(self, filething, v1=1, v2_version=4, v23_sep='/', padding=None): + """Save ID3v2 data to the Wave/RIFF file""" + + fileobj = filething.fileobj + wave_file = _WaveFile(fileobj) + + if u'id3' not in wave_file: + wave_file.insert_chunk(u'id3') + + chunk = wave_file[u'id3'] + + try: + data = self._prepare_data( + fileobj, chunk.data_offset, chunk.data_size, v2_version, + v23_sep, padding) + except ID3Error as e: + reraise(error, e, sys.exc_info()[2]) + + chunk.resize(len(data)) + chunk.write(data) + + def delete(self, filething): + """Completely removes the ID3 chunk from the RIFF/WAVE file""" + + delete(filething) + self.clear() + + +@convert_error(IOError, error) +@loadfile(method=False, writable=True) +def delete(filething): + """Completely removes the ID3 chunk from the RIFF/WAVE file""" + + try: + _WaveFile(filething.fileobj).delete_chunk(u'id3') + except KeyError: + pass + + +class WAVE(FileType): + """WAVE(filething) + + A Waveform Audio File Format + (WAVE, or more commonly known as WAV due to its filename extension) + + Arguments: + filething (filething) + + Attributes: + tags (`mutagen.id3.ID3`) + info (`WaveStreamInfo`) + """ + + _mimes = ["audio/wav", "audio/wave"] + + @staticmethod + def score(filename, fileobj, header): + filename = filename.lower() + + return (header.startswith(b"RIFF") + (header[8:12] == b'WAVE') + + endswith(filename, b".wav") + endswith(filename, b".wave")) + + def add_tags(self): + """Add an empty ID3 tag to the file.""" + if self.tags is None: + self.tags = _WaveID3() + else: + raise error("an ID3 tag already exists") + + @convert_error(IOError, error) + @loadfile() + def load(self, filething, **kwargs): + """Load stream and tag information from a file.""" + + fileobj = filething.fileobj + self.info = WaveStreamInfo(fileobj) + fileobj.seek(0, 0) + + try: + self.tags = _WaveID3(fileobj, **kwargs) + except ID3NoHeaderError: + self.tags = None + except ID3Error as e: + raise error(e) + else: + self.tags.filename = self.filename + + +Open = WAVE diff --git a/script.module.mutagen/lib/mutagen/wavpack.py b/script.module.mutagen/lib/mutagen/wavpack.py index 23b5e2890..c2515eef1 100644 --- a/script.module.mutagen/lib/mutagen/wavpack.py +++ b/script.module.mutagen/lib/mutagen/wavpack.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2006 Joe Wreschnig # 2014 Christoph Reiter # @@ -78,6 +77,7 @@ class WavPackInfo(StreamInfo): channels (int): number of audio channels (1 or 2) length (float): file length in seconds, as a float sample_rate (int): audio sampling rate in Hz + bits_per_sample (int): audio sample size version (int): WavPack stream version """ @@ -90,6 +90,12 @@ def __init__(self, fileobj): self.version = header.version self.channels = bool(header.flags & 4) or 2 self.sample_rate = RATES[(header.flags >> 23) & 0xF] + self.bits_per_sample = ((header.flags & 3) + 1) * 8 + + # most common multiplier (DSD64) + if (header.flags >> 31) & 1: + self.sample_rate *= 4 + self.bits_per_sample = 1 if header.total_samples == -1 or header.block_index != 0: # TODO: we could make this faster by using the tag size diff --git a/script.module.mutagen/icon.png b/script.module.mutagen/resources/icon.png similarity index 100% rename from script.module.mutagen/icon.png rename to script.module.mutagen/resources/icon.png