Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Quotes consistency #2326

Merged
merged 3 commits into from
Sep 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,15 @@ print(d['small'])
In above example, `l1` contains identifiers (keys of dict `d`) while others are English words/phrases.
In the dict declaration, keys are single-quoted but values are double-quoted.

Otherwise, choose whichever limit the number of escaped characters.
URIs (and paths used in URIs) should be, in general, enclosed in double quotes,
mainly because single quotes can appear in URI, unencoded, as sub-delimiters as specified
by [RFC3986](https://www.rfc-editor.org/rfc/rfc3986#section-2.2).

HTML/XML code often contains attributes that are enclosed by double quotes, so in this case,
better use single quotes, e.g. `html = '<a href="someurl">text</a>'`.

In doubt, choose whichever limit the number of escaped characters.
Typically single quote strings that are meant to contain double quotes (e.g. `'The file is "{file}"'`).


## Git Work-flow
Expand Down
4 changes: 2 additions & 2 deletions picard/acoustid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def _run_next_task(self):
# long path support is enabled. Ensure the path is properly prefixed.
if IS_WIN:
file_path = win_prefix_longpath(file_path)
process.start(self._fpcalc, ["-json", "-length", "120", file_path])
process.start(self._fpcalc, ['-json', '-length', '120', file_path])
log.debug("Starting fingerprint calculator %r %r", self._fpcalc, task.file.filename)

def analyze(self, file, next_func):
Expand All @@ -273,7 +273,7 @@ def analyze(self, file, next_func):

config = get_config()
fingerprint = task.file.acoustid_fingerprint
if not fingerprint and not config.setting["ignore_existing_acoustid_fingerprints"]:
if not fingerprint and not config.setting['ignore_existing_acoustid_fingerprints']:
# use cached fingerprint from file metadata
fingerprints = task.file.metadata.getall('acoustid_fingerprint')
if fingerprints:
Expand Down
6 changes: 3 additions & 3 deletions picard/acoustid/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def _batch_submit(self, submissions, errors=None):
log.debug("AcoustID: submitting batch of %d fingerprints (%d remaining)…",
len(batch), len(submissions))
self.tagger.window.set_statusbar_message(
N_('Submitting AcoustIDs …'),
N_("Submitting AcoustIDs …"),
echo=None
)
if not errors:
Expand All @@ -228,7 +228,7 @@ def _batch_submit_finished(self, submissions, batch, previous_errors, document,
else:
try:
errordoc = load_json(document)
message = errordoc["error"]["message"]
message = errordoc['error']['message']
except BaseException:
message = ""
mparms = {
Expand All @@ -241,7 +241,7 @@ def _batch_submit_finished(self, submissions, batch, previous_errors, document,
self.tagger.window.set_statusbar_message(
log_msg, mparms, echo=None, timeout=3000)
else:
log.debug('AcoustID: %d fingerprints successfully submitted', len(batch))
log.debug("AcoustID: %d fingerprints successfully submitted", len(batch))
for file, submission in batch:
submission.orig_recordingid = submission.recordingid
file.update()
Expand Down
44 changes: 22 additions & 22 deletions picard/album.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __init__(self, album_id, discid=None):
self.update_metadata_images_enabled = True

def __repr__(self):
return '<Album %s %r>' % (self.id, self.metadata["album"])
return '<Album %s %r>' % (self.id, self.metadata['album'])

def iterfiles(self, save=False):
for track in self.tracks:
Expand Down Expand Up @@ -288,11 +288,11 @@ def _release_request_finished(self, document, http, error):
if error == QtNetwork.QNetworkReply.NetworkError.ContentNotFoundError:
config = get_config()
nats = False
nat_name = config.setting["nat_name"]
nat_name = config.setting['nat_name']
files = list(self.unmatched_files.files)
for file in files:
recordingid = file.metadata["musicbrainz_recordingid"]
if mbid_validate(recordingid) and file.metadata["album"] == nat_name:
recordingid = file.metadata['musicbrainz_recordingid']
if mbid_validate(recordingid) and file.metadata['album'] == nat_name:
nats = True
self.tagger.move_file_to_nat(file, recordingid)
self.tagger.nats.update()
Expand All @@ -304,7 +304,7 @@ def _release_request_finished(self, document, http, error):
parse_result = self._parse_release(document)
config = get_config()
if parse_result == ParseResult.MISSING_TRACK_RELS:
log.debug('Recording relationships not loaded in initial request for %r, issuing separate requests', self)
log.debug("Recording relationships not loaded in initial request for %r, issuing separate requests", self)
self._request_recording_relationships()
elif parse_result == ParseResult.PARSED:
self._run_album_metadata_processors()
Expand All @@ -327,7 +327,7 @@ def _request_recording_relationships(self, offset=0, limit=RECORDING_QUERY_LIMIT
'work-rels',
'work-level-rels',
)
log.debug('Loading recording relationships for %r (offset=%i, limit=%i)', self, offset, limit)
log.debug("Loading recording relationships for %r (offset=%i, limit=%i)", self, offset, limit)
self._requests += 1
self.load_task = self.tagger.mb_api.browse_recordings(
self._recordings_request_finished,
Expand Down Expand Up @@ -400,7 +400,7 @@ def _finalize_loading_track(self, track_node, metadata, artists, extra_metadata=
track._customize_metadata()

self._new_metadata.length += tm.length
artists.add(tm["artist"])
artists.add(tm['artist'])
if extra_metadata:
tm.update(extra_metadata)

Expand All @@ -426,7 +426,7 @@ def _load_track(node, mm, artists, extra_metadata):
va = self._new_metadata['musicbrainz_albumartistid'] == VARIOUS_ARTISTS_ID

djmix_ars = {}
if hasattr(self._new_metadata, "_djmix_ars"):
if hasattr(self._new_metadata, '_djmix_ars'):
djmix_ars = self._new_metadata._djmix_ars

for medium_node in self._release_node['media']:
Expand All @@ -437,13 +437,13 @@ def _load_track(node, mm, artists, extra_metadata):
if fmt:
all_media.append(fmt)

for dj in djmix_ars.get(mm["discnumber"], []):
mm.add("djmixer", dj)
for dj in djmix_ars.get(mm['discnumber'], []):
mm.add('djmixer', dj)

if va:
mm["compilation"] = "1"
mm['compilation'] = '1'
else:
del mm["compilation"]
del mm['compilation']

if 'discs' in medium_node:
discids = [disc.get('id') for disc in medium_node['discs']]
Expand All @@ -468,9 +468,9 @@ def _load_track(node, mm, artists, extra_metadata):

multiartists = len(artists) > 1
for track in self._new_tracks:
track.metadata["~totalalbumtracks"] = totalalbumtracks
track.metadata['~totalalbumtracks'] = totalalbumtracks
if multiartists:
track.metadata["~multiartist"] = "1"
track.metadata['~multiartist'] = '1'
del self._release_node
del self._release_artist_nodes
self._tracks_loaded = True
Expand Down Expand Up @@ -535,10 +535,10 @@ def _finalize_loading(self, error):
import inspect
stack = inspect.stack()
args = [self]
msg = 'Album._finalize_loading called for already loaded album %r'
msg = "Album._finalize_loading called for already loaded album %r"
if len(stack) > 1:
f = stack[1]
msg += ' at %s:%d in %s'
msg += " at %s:%d in %s"
args.extend((f.filename, f.lineno, f.function))
log.warning(msg, *args)
return
Expand Down Expand Up @@ -570,7 +570,7 @@ def load(self, priority=False, refresh=False):
log.info("Not reloading, some requests are still active.")
return
self.tagger.window.set_statusbar_message(
N_('Loading album %(id)s …'),
N_("Loading album %(id)s …"),
{'id': self.id}
)
self.loaded = False
Expand Down Expand Up @@ -863,18 +863,18 @@ def keep_original_images(self):
class NatAlbum(Album):

def __init__(self):
super().__init__("NATS")
super().__init__('NATS')
self.loaded = True
self.update()

def update(self, update_tracks=True, update_selection=True):
config = get_config()
self.enable_update_metadata_images(False)
old_album_title = self.metadata["album"]
self.metadata["album"] = config.setting["nat_name"]
old_album_title = self.metadata['album']
self.metadata['album'] = config.setting['nat_name']
for track in self.tracks:
if old_album_title == track.metadata["album"]:
track.metadata["album"] = self.metadata["album"]
if old_album_title == track.metadata['album']:
track.metadata['album'] = self.metadata['album']
for file in track.files:
track.update_file_metadata(file)
self.enable_update_metadata_images(True)
Expand Down
20 changes: 10 additions & 10 deletions picard/browser/addrelease.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
import jwt
import jwt.exceptions
except ImportError:
log.debug('PyJWT not available, addrelease functionality disabled')
log.debug("PyJWT not available, addrelease functionality disabled")
jwt = None

__key = token_bytes() # Generating a new secret on each startup
Expand Down Expand Up @@ -89,18 +89,18 @@ def submit_file(file, as_release=False):
def serve_form(token):
try:
payload = jwt.decode(token, __key, algorithms=__algorithm)
log.debug('received JWT token %r', payload)
log.debug("received JWT token %r", payload)
tagger = QCoreApplication.instance()
tport = tagger.browser_integration.port
if 'cluster' in payload:
cluster = _find_cluster(tagger, payload['cluster'])
if not cluster:
raise NotFoundError('Cluster not found')
raise NotFoundError("Cluster not found")
return _get_cluster_form(cluster, tport)
elif 'file' in payload:
file = _find_file(tagger, payload['file'])
if not file:
raise NotFoundError('File not found')
raise NotFoundError("File not found")
if payload.get('as_release', False):
return _get_file_as_release_form(file, tport)
else:
Expand Down Expand Up @@ -141,29 +141,29 @@ def _find_file(tagger, path):

def _get_cluster_form(cluster, tport):
return _get_form(
_('Add cluster as release'),
_("Add cluster as release"),
'/release/add',
_('Add cluster as release…'),
_("Add cluster as release…"),
_get_cluster_data(cluster),
{'tport': tport}
)


def _get_file_as_release_form(file, tport):
return _get_form(
_('Add file as release'),
_("Add file as release"),
'/release/add',
_('Add file as release…'),
_("Add file as release…"),
_get_file_as_release_data(file),
{'tport': tport}
)


def _get_file_as_recording_form(file, tport):
return _get_form(
_('Add file as recording'),
_("Add file as recording"),
'/recording/create',
_('Add file as recording…'),
_("Add file as recording…"),
_get_file_as_recording_data(file),
{'tport': tport}
)
Expand Down
8 changes: 4 additions & 4 deletions picard/browser/filelookup.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def mbid_lookup(self, string, type_=None, mbid_matched_callback=None, browser_fa
id = m.group('id')
if entity != 'cdtoc':
id = id.lower()
log.debug('Lookup for %s:%s', entity, id)
log.debug("Lookup for %s:%s", entity, id)
if mbid_matched_callback:
mbid_matched_callback(entity, id)
if entity == 'release':
Expand Down Expand Up @@ -162,10 +162,10 @@ def tag_lookup(self, artist, release, track, tracknum, duration, filename):
'duration': duration,
'filename': os.path.basename(filename),
}
return self._build_launch('/taglookup', params)
return self._build_launch("/taglookup", params)

def collection_lookup(self, userid):
return self._build_launch('/user/%s/collections' % userid)
return self._build_launch("/user/%s/collections" % userid)

def search_entity(self, type_, query, adv=False, mbid_matched_callback=None, force_browser=False):
if not force_browser and self.mbid_lookup(query, type_, mbid_matched_callback=mbid_matched_callback):
Expand All @@ -178,4 +178,4 @@ def search_entity(self, type_, query, adv=False, mbid_matched_callback=None, for
}
if adv:
params['adv'] = 'on'
return self._build_launch('/search/textsearch', params)
return self._build_launch("/search/textsearch", params)
6 changes: 3 additions & 3 deletions picard/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def __repr__(self):
if self.related_album:
return '<Cluster %s %r>' % (
self.related_album.id,
self.related_album.metadata["album"] + '/' + self.metadata['album']
self.related_album.metadata['album'] + '/' + self.metadata['album']
)
return '<Cluster %r>' % self.metadata['album']

Expand Down Expand Up @@ -308,8 +308,8 @@ def cluster(files):

cluster_list = defaultdict(FileCluster)
for file in files:
artist = file.metadata["albumartist"] or file.metadata["artist"]
album = file.metadata["album"]
artist = file.metadata['albumartist'] or file.metadata['artist']
album = file.metadata['album']

# Improve clustering from directory structure if no existing tags
# Only used for grouping and to provide cluster title / artist - not added to file tags.
Expand Down
8 changes: 4 additions & 4 deletions picard/collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,12 @@ def request_finished(document, reply, error):
echo=log.error
)
return
if document and "collections" in document:
if document and 'collections' in document:
collection_list = document['collections']
new_collections = set()

for node in collection_list:
if node["entity-type"] != "release":
if node['entity-type'] != 'release':
continue
col_id = node['id']
col_name = node['name']
Expand All @@ -152,10 +152,10 @@ def request_finished(document, reply, error):
def add_release_to_user_collections(release_node):
"""Add album to collections"""
# Check for empy collection list
if "collections" in release_node:
if 'collections' in release_node:
release_id = release_node['id']
config = get_config()
username = config.persist["oauth_username"].lower()
username = config.persist['oauth_username'].lower()
for node in release_node['collections']:
if node['editor'].lower() == username:
col_id = node['id']
Expand Down
Loading
Loading