Compare commits

...

3 Commits

Author SHA1 Message Date
1d6d3405c3 Use different binaries for feature extraction 2025-09-23 22:50:52 -04:00
f43eeaafe0 Remove Gaia filtering 2025-09-23 19:42:44 -04:00
a8794fa239 Better replaygain parsing 2025-09-23 19:37:29 -04:00
8 changed files with 310 additions and 259 deletions

View File

@@ -1,4 +1,5 @@
import os import os
import re
import json import json
import shutil import shutil
import subprocess import subprocess
@@ -8,10 +9,10 @@ import struct
import threading import threading
import concurrent.futures import concurrent.futures
import tempfile import tempfile
import math
import yaml
from functools import partial from functools import partial
from typing import List, Tuple, Dict, Optional from typing import List, Tuple, Dict, Optional
from picard import config, log from picard import config, log
from picard.ui.itemviews import ( from picard.ui.itemviews import (
BaseAction, BaseAction,
@@ -22,10 +23,7 @@ from picard.track import Track
from picard.album import Album from picard.album import Album
from picard.ui.options import OptionsPage, register_options_page from picard.ui.options import OptionsPage, register_options_page
from picard.util import thread from picard.util import thread
from picard.coverart.image import ( from picard.coverart.image import CoverArtImage
CoverArtImage,
CoverArtImageError,
)
from PyQt5 import QtWidgets, QtCore from PyQt5 import QtWidgets, QtCore
_analysis_semaphore = None _analysis_semaphore = None
@@ -56,20 +54,23 @@ class AcousticBrainzNG:
binary_path += '.exe' binary_path += '.exe'
return binary_path return binary_path
def _get_binary_paths(self) -> Tuple[str, str]: def _get_binary_paths(self) -> Tuple[str, str, str]:
binaries_path = config.setting["acousticbrainz_ng_binaries_path"] binaries_path = config.setting["acousticbrainz_ng_binaries_path"]
if not binaries_path: if not binaries_path:
raise ValueError("Binaries path not configured") raise ValueError("Binaries path not configured")
musicnn_binary_path = self._get_binary_path("streaming_musicnn_predict", binaries_path) musicnn_binary_path = self._get_binary_path("streaming_musicnn_predict", binaries_path)
gaia_binary_path = self._get_binary_path("streaming_extractor_music", binaries_path) rhythm_binary_path = self._get_binary_path("streaming_rhythmextractor_multifeature", binaries_path)
key_binary_path = self._get_binary_path("streaming_key", binaries_path)
if not os.path.exists(musicnn_binary_path): if not os.path.exists(musicnn_binary_path):
raise FileNotFoundError(f"Binary {musicnn_binary_path} not found") raise FileNotFoundError(f"Binary {musicnn_binary_path} not found")
if not os.path.exists(gaia_binary_path): if not os.path.exists(rhythm_binary_path):
raise FileNotFoundError(f"Binary {gaia_binary_path} not found") raise FileNotFoundError(f"Binary {rhythm_binary_path} not found")
if not os.path.exists(key_binary_path):
raise FileNotFoundError(f"Binary {key_binary_path} not found")
return musicnn_binary_path, gaia_binary_path return musicnn_binary_path, rhythm_binary_path, key_binary_path
def _run_musicnn_models(self, models: List[Tuple[str, str]], musicnn_binary_path: str, file: str, output_path: str) -> bool: def _run_musicnn_models(self, models: List[Tuple[str, str]], musicnn_binary_path: str, file: str, output_path: str) -> bool:
models_path = config.setting["acousticbrainz_ng_models_path"] models_path = config.setting["acousticbrainz_ng_models_path"]
@@ -133,7 +134,7 @@ class AcousticBrainzNG:
return False return False
try: try:
musicnn_binary_path, gaia_binary_path = self._get_binary_paths() musicnn_binary_path, rhythm_binary_path, key_binary_path = self._get_binary_paths()
except (ValueError, FileNotFoundError) as e: except (ValueError, FileNotFoundError) as e:
log.error(str(e)) log.error(str(e))
return False return False
@@ -147,80 +148,86 @@ class AcousticBrainzNG:
log.error(f"Error generating cache folder: {e}") log.error(f"Error generating cache folder: {e}")
return False return False
gaia_success = True rhythm_success = True
def run_gaia(): def run_rhythm():
nonlocal gaia_success nonlocal rhythm_success
if os.path.exists(os.path.join(output_path, "gaia.json")): if os.path.exists(os.path.join(output_path, "rhythm.yaml")):
return return
jq_path = config.setting["acousticbrainz_ng_jq_path"] rhythm_proc = subprocess.run(
if not jq_path or not os.path.exists(jq_path): [rhythm_binary_path, file],
log.error("jq binary path not configured or invalid")
gaia_success = False
return
gaia_proc = subprocess.run(
[gaia_binary_path, file, "-"],
capture_output=True, capture_output=True,
text=True, text=True,
env=ENV, env=ENV,
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0 creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
) )
if gaia_proc.returncode != 0: if rhythm_proc.returncode != 0:
gaia_success = False rhythm_success = False
log.error(f"Gaia binary {gaia_binary_path} failed on file {file} with exit code {gaia_proc.returncode}") log.error(f"Rhythm binary {rhythm_binary_path} failed on file {file} with exit code {rhythm_proc.returncode}")
if gaia_proc.stdout: if rhythm_proc.stdout:
log.error(f"Gaia stdout: {gaia_proc.stdout}") log.error(f"Rhythm stdout: {rhythm_proc.stdout}")
if gaia_proc.stderr: if rhythm_proc.stderr:
log.error(f"Gaia stderr: {gaia_proc.stderr}") log.error(f"Rhythm stderr: {rhythm_proc.stderr}")
return
jq_filter = (
"{ rhythm: { bpm: .rhythm.bpm }, "
"tonal: { "
"chords_changes_rate: .tonal.chords_changes_rate, "
"chords_key: .tonal.chords_key, "
"chords_scale: .tonal.chords_scale, "
"key_temperley: { key: .tonal.key_temperley.key, scale: .tonal.key_temperley.scale, strength: .tonal.key_temperley.strength }, "
"key_krumhansl: { key: .tonal.key_krumhansl.key, scale: .tonal.key_krumhansl.scale, strength: .tonal.key_krumhansl.strength }, "
"key_edma: { key: .tonal.key_edma.key, scale: .tonal.key_edma.scale, strength: .tonal.key_edma.strength } "
"} }"
)
jq_proc = subprocess.run(
[jq_path, jq_filter],
input=gaia_proc.stdout,
capture_output=True,
text=True,
env=ENV,
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
)
if jq_proc.returncode != 0:
gaia_success = False
log.error(f"jq failed to post-process Gaia JSON with exit code {jq_proc.returncode}")
if jq_proc.stdout:
log.error(f"jq stdout: {jq_proc.stdout}")
if jq_proc.stderr:
log.error(f"jq stderr: {jq_proc.stderr}")
return return
try: try:
os.makedirs(output_path, exist_ok=True) stdout = rhythm_proc.stdout or ""
with open(os.path.join(output_path, "gaia.json"), "w", encoding="utf-8") as f: lines = stdout.splitlines(keepends=True)
f.write(jq_proc.stdout) if not lines:
except Exception as e: raise ValueError("Rhythm binary produced no stdout")
gaia_success = False
log.error(f"Failed to write processed Gaia JSON: {e}")
gaia_thread = threading.Thread(target=run_gaia) yaml_lines = lines[-5:] if len(lines) >= 5 else lines
gaia_thread.start() yaml_str = "".join(yaml_lines)
if not yaml_str.strip():
raise ValueError("Empty YAML section extracted from rhythm binary output")
out_file = os.path.join(output_path, "rhythm.yaml")
with open(out_file, "w", encoding="utf-8") as f:
f.write(yaml_str)
except Exception as e:
rhythm_success = False
log.error(f"Failed to extract/save rhythm.yaml from rhythm binary stdout: {e}")
if rhythm_proc.stdout:
log.error(f"Rhythm stdout: {rhythm_proc.stdout}")
if rhythm_proc.stderr:
log.error(f"Rhythm stderr: {rhythm_proc.stderr}")
return
key_success = True
def run_key():
nonlocal key_success
if os.path.exists(os.path.join(output_path, "key.yaml")):
return
key_proc = subprocess.run(
[key_binary_path, file, os.path.join(output_path, "key.yaml")],
capture_output=True,
text=True,
env=ENV,
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
)
if key_proc.returncode != 0:
key_success = False
log.error(f"Key binary {key_binary_path} failed on file {file} with exit code {key_proc.returncode}")
if key_proc.stdout:
log.error(f"Key stdout: {key_proc.stdout}")
if key_proc.stderr:
log.error(f"Key stderr: {key_proc.stderr}")
return
rhythm_thread = threading.Thread(target=run_rhythm)
rhythm_thread.start()
key_thread = threading.Thread(target=run_key)
key_thread.start()
musicnn_success = self._run_musicnn_models(REQUIRED_MODELS, musicnn_binary_path, file, output_path) musicnn_success = self._run_musicnn_models(REQUIRED_MODELS, musicnn_binary_path, file, output_path)
gaia_thread.join() rhythm_thread.join()
key_thread.join()
return gaia_success and musicnn_success return rhythm_success and key_success and musicnn_success
def analyze_optional(self, metadata: Dict, file: str) -> bool: def analyze_optional(self, metadata: Dict, file: str) -> bool:
if not self._check_binaries(): if not self._check_binaries():
@@ -232,7 +239,7 @@ class AcousticBrainzNG:
return False return False
try: try:
musicnn_binary_path, _ = self._get_binary_paths() musicnn_binary_path = self._get_binary_paths()[0]
except (ValueError, FileNotFoundError) as e: except (ValueError, FileNotFoundError) as e:
log.error(str(e)) log.error(str(e))
return False return False
@@ -329,50 +336,48 @@ class AcousticBrainzNG:
metadata['mood'] = moods metadata['mood'] = moods
metadata['tags'] = tags metadata['tags'] = tags
gaia_data = {} rhythm_data = {}
gaia_json_path = os.path.join(output_path, "gaia.json") rhythm_yaml_path = os.path.join(output_path, "rhythm.yaml")
if os.path.exists(gaia_json_path): key_data = {}
try: key_yaml_path = os.path.join(output_path, "key.yaml")
with open(gaia_json_path, 'r', encoding='utf-8') as f:
gaia_data = json.load(f) if os.path.exists(rhythm_yaml_path):
except (FileNotFoundError, json.JSONDecodeError) as e: with open(rhythm_yaml_path, 'r', encoding='utf-8') as f:
log.error(f"Error reading Gaia JSON file: {e}") loaded = yaml.safe_load(f)
if not isinstance(loaded, dict):
log.error("Invalid rhythm YAML format: expected a mapping at the top level")
return False return False
rhythm_data = loaded
else: else:
log.error(f"Gaia JSON file not found: {gaia_json_path}") log.error(f"Rhythm YAML file not found: {rhythm_yaml_path}")
return False
if os.path.exists(key_yaml_path):
with open(key_yaml_path, 'r', encoding='utf-8') as f:
loaded = yaml.safe_load(f)
if not isinstance(loaded, dict):
log.error("Invalid key YAML format: expected a mapping at the top level")
return False
key_data = loaded
else:
log.error(f"Key YAML file not found: {key_yaml_path}")
return False return False
try: try:
metadata["bpm"] = int(round(gaia_data["rhythm"]["bpm"])) metadata["bpm"] = int(round(rhythm_data["bpm"]))
metadata["key"] = "o" if key_data["tonal"]["key_scale"] == "off" else f"{key_data['tonal']['key']}{'m' if key_data['tonal']['key_scale'] == 'minor' else ''}"
if config.setting["acousticbrainz_ng_save_raw"]: if config.setting["acousticbrainz_ng_save_raw"]:
metadata["ab:lo:tonal:chords_changes_rate"] = gaia_data["tonal"]["chords_changes_rate"] metadata["ab:lo:tonal:key_scale"] = key_data["tonal"]["key_scale"]
metadata["ab:lo:tonal:chords_key"] = gaia_data["tonal"]["chords_key"] metadata["ab:lo:tonal:key_key"] = key_data["tonal"]["key"]
metadata["ab:lo:tonal:chords_scale"] = gaia_data["tonal"]["chords_scale"]
highestStrength = -1
selectedAlgorithm = None
for algorithm in GAIA_KEY_ALGORITHMS:
key_data = gaia_data["tonal"][f"key_{algorithm}"]
if key_data["strength"] > highestStrength:
highestStrength = key_data["strength"]
selectedAlgorithm = algorithm
if selectedAlgorithm:
selected_key_data = gaia_data["tonal"][f"key_{selectedAlgorithm}"]
metadata["key"] = "o" if selected_key_data["scale"] == "off" else f"{selected_key_data['key']}{'m' if selected_key_data['scale'] == 'minor' else ''}"
if config.setting["acousticbrainz_ng_save_raw"]:
metadata["ab:lo:tonal:key_scale"] = selected_key_data["scale"]
metadata["ab:lo:tonal:key_key"] = selected_key_data["key"]
return True return True
except Exception as e: except Exception as e:
log.error(f"Error processing gaia data: {e}") log.error(f"Error processing feature data: {e}")
return False return False
def parse_optional(self, metadata: Dict, file: str) -> bool: def parse_optional(self, metadata: Dict, file: str) -> bool:
@@ -671,7 +676,7 @@ class AcousticBrainzNG:
if not ffmpeg_path: if not ffmpeg_path:
raise ValueError("FFmpeg path not configured") raise ValueError("FFmpeg path not configured")
replaygain_lufs_result = subprocess.run( replaygain_proc = subprocess.run(
[ffmpeg_path, "-hide_banner", "-i", file_path, "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"], [ffmpeg_path, "-hide_banner", "-i", file_path, "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"],
capture_output=True, capture_output=True,
text=True, text=True,
@@ -679,40 +684,66 @@ class AcousticBrainzNG:
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0 creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
) )
if replaygain_lufs_result.returncode != 0: if replaygain_proc.returncode != 0:
log.error(f"FFmpeg failed for ReplayGain LUFS calculation on file {file_path} with exit code {replaygain_lufs_result.returncode}") log.error(f"FFmpeg failed for ReplayGain LUFS calculation on file {file_path} with exit code {replaygain_proc.returncode}")
if replaygain_lufs_result.stdout: if replaygain_proc.stdout:
log.error(f"FFmpeg stdout: {replaygain_lufs_result.stdout}") log.error(f"FFmpeg stdout: {replaygain_proc.stdout}")
if replaygain_lufs_result.stderr: if replaygain_proc.stderr:
log.error(f"FFmpeg stderr: {replaygain_lufs_result.stderr}") log.error(f"FFmpeg stderr: {replaygain_proc.stderr}")
return {} return {}
replaygain_log = replaygain_proc.stderr or replaygain_proc.stdout
replaygain_match = re.search(r'\{.*?\}', replaygain_log, re.S)
replaygain_matches = re.findall(r'\{.*?\}', replaygain_log, re.S) if not replaygain_match else None
replaygain_json_text = replaygain_match.group(0) if replaygain_match else (replaygain_matches[0] if replaygain_matches else None)
replaygain_gain = None replaygain_gain = None
replaygain_peak = None replaygain_peak = None
replaygain_range = None replaygain_range = None
replaygain_lufs_result: dict | None = None
if replaygain_json_text:
try: try:
json_start = replaygain_lufs_result.stderr.find('{') replaygain_lufs_result = json.loads(replaygain_json_text)
if json_start != -1: except json.JSONDecodeError:
json_str = replaygain_lufs_result.stderr[json_start:] if replaygain_matches:
json_end = json_str.find('}') + 1 try:
if json_end > 0: replaygain_lufs_result = json.loads(replaygain_matches[-1])
loudnorm_data = json.loads(json_str[:json_end]) except Exception:
input_i = loudnorm_data.get('input_i') replaygain_lufs_result = None
input_tp = loudnorm_data.get('input_tp')
input_lra = loudnorm_data.get('input_lra')
if input_i and input_i != "-inf": input_i = replaygain_lufs_result.get('input_i') if replaygain_lufs_result else None
replaygain_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - float(input_i):.2f}" input_tp = replaygain_lufs_result.get('input_tp') if replaygain_lufs_result else None
input_lra = replaygain_lufs_result.get('input_lra') if replaygain_lufs_result else None
if input_tp and input_tp != "-inf": try:
replaygain_peak = f"{10 ** (float(input_tp) / 20):.6f}" if input_i:
input_i_val = float(input_i)
except (TypeError, ValueError):
input_i_val = None
if input_lra and input_lra != "-inf": try:
replaygain_range = f"{float(input_lra):.2f}" if input_tp:
input_tp_val = float(input_tp)
except (TypeError, ValueError):
input_tp_val = None
except (json.JSONDecodeError, ValueError, TypeError): try:
pass if input_lra:
input_lra_val = float(input_lra)
except (TypeError, ValueError):
input_lra_val = None
if input_i_val is not None and math.isfinite(input_i_val):
replaygain_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - input_i_val:.2f}"
if input_tp_val is not None and math.isfinite(input_tp_val):
replaygain_peak = f"{10 ** (input_tp_val / 20):.6f}"
if input_lra_val is not None and math.isfinite(input_lra_val):
replaygain_range = f"{input_lra_val:.2f}"
result: Dict = { result: Dict = {
"replaygain_track_gain": replaygain_gain, "replaygain_track_gain": replaygain_gain,
@@ -721,7 +752,7 @@ class AcousticBrainzNG:
"replaygain_reference_loudness": f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18):.2f}" "replaygain_reference_loudness": f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18):.2f}"
} }
r128_result = subprocess.run( r128_proc = subprocess.run(
[ffmpeg_path, "-hide_banner", "-i", file_path, "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"], [ffmpeg_path, "-hide_banner", "-i", file_path, "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"],
capture_output=True, capture_output=True,
text=True, text=True,
@@ -729,27 +760,44 @@ class AcousticBrainzNG:
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0 creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
) )
if r128_result.returncode != 0: if r128_proc.returncode != 0:
log.error(f"FFmpeg failed for R128 calculation on file {file_path} with exit code {r128_result.returncode}") log.error(f"FFmpeg failed for R128 calculation on file {file_path} with exit code {r128_proc.returncode}")
if r128_result.stdout: if r128_proc.stdout:
log.error(f"FFmpeg stdout: {r128_result.stdout}") log.error(f"FFmpeg stdout: {r128_proc.stdout}")
if r128_result.stderr: if r128_proc.stderr:
log.error(f"FFmpeg stderr: {r128_result.stderr}") log.error(f"FFmpeg stderr: {r128_proc.stderr}")
return result return result
r128_log = r128_proc.stderr or r128_proc.stdout
r128_match = re.search(r'\{.*?\}', r128_log, re.S)
r128_matches = re.findall(r'\{.*?\}', r128_log, re.S) if not r128_match else None
r128_json_text = r128_match.group(0) if r128_match else (r128_matches[0] if r128_matches else None)
r128_track_gain = None r128_track_gain = None
try: r128_data: dict | None = None
json_start = r128_result.stderr.find('{')
if json_start != -1:
json_str = r128_result.stderr[json_start:]
json_end = json_str.find('}') + 1
if json_end > 0:
r128_data = json.loads(json_str[:json_end])
r128_input_i = r128_data.get('input_i')
if r128_input_i and r128_input_i != "-inf": if r128_json_text:
r128_gain_db = -23 - float(r128_input_i) try:
r128_data = json.loads(r128_json_text)
except json.JSONDecodeError:
if r128_matches:
try:
r128_data = json.loads(r128_matches[-1])
except Exception:
r128_data = None
r128_input_i = r128_data.get('input_i') if r128_data else None
try:
if r128_input_i:
r128_input_i_val = int(r128_input_i)
except (TypeError, ValueError):
r128_input_i_val = None
if r128_input_i_val is not None and math.isfinite(r128_input_i_val):
r128_gain_db = -23 - r128_input_i_val
r128_track_gain = int(round(r128_gain_db * 256)) r128_track_gain = int(round(r128_gain_db * 256))
if r128_track_gain < -32768: if r128_track_gain < -32768:
@@ -757,9 +805,6 @@ class AcousticBrainzNG:
elif r128_track_gain > 32767: elif r128_track_gain > 32767:
r128_track_gain = 32767 r128_track_gain = 32767
except (json.JSONDecodeError, ValueError, TypeError):
pass
result["r128_track_gain"] = r128_track_gain result["r128_track_gain"] = r128_track_gain
return result return result
@@ -787,7 +832,7 @@ class AcousticBrainzNG:
concat_file_path = concat_file.name concat_file_path = concat_file.name
try: try:
album_replaygain_result = subprocess.run( album_replaygain_proc = subprocess.run(
[ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path, [ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path,
"-vn", "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"], "-vn", "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"],
capture_output=True, capture_output=True,
@@ -796,41 +841,67 @@ class AcousticBrainzNG:
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0 creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
) )
if album_replaygain_result.returncode != 0: if album_replaygain_proc.returncode != 0:
log.error(f"FFmpeg failed for album ReplayGain calculation on {len(album_track_files)} files with exit code {album_replaygain_result.returncode}") log.error(f"FFmpeg failed for album ReplayGain calculation on {len(album_track_files)} files with exit code {album_replaygain_proc.returncode}")
log.error(f"Album files: {', '.join(album_track_files)}") log.error(f"Album files: {', '.join(album_track_files)}")
if album_replaygain_result.stdout: if album_replaygain_proc.stdout:
log.error(f"FFmpeg stdout: {album_replaygain_result.stdout}") log.error(f"FFmpeg stdout: {album_replaygain_proc.stdout}")
if album_replaygain_result.stderr: if album_replaygain_proc.stderr:
log.error(f"FFmpeg stderr: {album_replaygain_result.stderr}") log.error(f"FFmpeg stderr: {album_replaygain_proc.stderr}")
return {} return {}
album_replaygain_log = album_replaygain_proc.stderr or album_replaygain_proc.stdout
album_replaygain_match = re.search(r'\{.*?\}', album_replaygain_log, re.S)
album_replaygain_matches = re.findall(r'\{.*?\}', album_replaygain_log, re.S) if not album_replaygain_match else None
album_replaygain_json_text = album_replaygain_match.group(0) if album_replaygain_match else (album_replaygain_matches[0] if album_replaygain_matches else None)
album_gain = None album_gain = None
album_peak = None album_peak = None
album_range = None album_range = None
loudnorm_data: dict | None = None
if album_replaygain_json_text:
try: try:
json_start = album_replaygain_result.stderr.find('{') loudnorm_data = json.loads(album_replaygain_json_text)
if json_start != -1: except json.JSONDecodeError:
json_str = album_replaygain_result.stderr[json_start:] if album_replaygain_matches:
json_end = json_str.find('}') + 1 try:
if json_end > 0: loudnorm_data = json.loads(album_replaygain_matches[-1])
loudnorm_data = json.loads(json_str[:json_end]) except Exception:
input_i = loudnorm_data.get('input_i') loudnorm_data = None
input_tp = loudnorm_data.get('input_tp')
input_lra = loudnorm_data.get('input_lra')
if input_i and input_i != "-inf": input_i = loudnorm_data.get('input_i') if loudnorm_data else None
album_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - float(input_i):.2f}" input_tp = loudnorm_data.get('input_tp') if loudnorm_data else None
input_lra = loudnorm_data.get('input_lra') if loudnorm_data else None
if input_tp and input_tp != "-inf": try:
album_peak = f"{10 ** (float(input_tp) / 20):.6f}" if input_i:
input_i_val = float(input_i)
except (TypeError, ValueError):
input_i_val = None
if input_lra and input_lra != "-inf": try:
album_range = f"{float(input_lra):.2f}" if input_tp:
input_tp_val = float(input_tp)
except (TypeError, ValueError):
input_tp_val = None
except (json.JSONDecodeError, ValueError, TypeError): try:
pass if input_lra:
input_lra_val = float(input_lra)
except (TypeError, ValueError):
input_lra_val = None
if input_i_val is not None and math.isfinite(input_i_val):
album_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - input_i_val:.2f}"
if input_tp_val is not None and math.isfinite(input_tp_val):
album_peak = f"{10 ** (input_tp_val / 20):.6f}"
if input_lra_val is not None and math.isfinite(input_lra_val):
album_range = f"{input_lra_val:.2f}"
result: Dict = { result: Dict = {
"replaygain_album_gain": album_gain, "replaygain_album_gain": album_gain,
@@ -838,7 +909,7 @@ class AcousticBrainzNG:
"replaygain_album_range": album_range "replaygain_album_range": album_range
} }
album_r128_result = subprocess.run( album_r128_proc = subprocess.run(
[ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path, [ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path,
"-vn", "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"], "-vn", "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"],
capture_output=True, capture_output=True,
@@ -847,28 +918,45 @@ class AcousticBrainzNG:
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0 creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
) )
if album_r128_result.returncode != 0: if album_r128_proc.returncode != 0:
log.error(f"FFmpeg failed for album R128 calculation on {len(album_track_files)} files with exit code {album_r128_result.returncode}") log.error(f"FFmpeg failed for album R128 calculation on {len(album_track_files)} files with exit code {album_r128_proc.returncode}")
log.error(f"Album files: {', '.join(album_track_files)}") log.error(f"Album files: {', '.join(album_track_files)}")
if album_r128_result.stdout: if album_r128_proc.stdout:
log.error(f"FFmpeg stdout: {album_r128_result.stdout}") log.error(f"FFmpeg stdout: {album_r128_proc.stdout}")
if album_r128_result.stderr: if album_r128_proc.stderr:
log.error(f"FFmpeg stderr: {album_r128_result.stderr}") log.error(f"FFmpeg stderr: {album_r128_proc.stderr}")
return result return result
album_r128_log = album_r128_proc.stderr or album_r128_proc.stdout
album_r128_match = re.search(r'\{.*?\}', album_r128_log, re.S)
album_r128_matches = re.findall(r'\{.*?\}', album_r128_log, re.S) if not album_r128_match else None
album_r128_json_text = album_r128_match.group(0) if album_r128_match else (album_r128_matches[0] if album_r128_matches else None)
r128_album_gain = None r128_album_gain = None
try: r128_data: dict | None = None
json_start = album_r128_result.stderr.find('{')
if json_start != -1:
json_str = album_r128_result.stderr[json_start:]
json_end = json_str.find('}') + 1
if json_end > 0:
r128_data = json.loads(json_str[:json_end])
r128_input_i = r128_data.get('input_i')
if r128_input_i and r128_input_i != "-inf": if album_r128_json_text:
r128_gain_db = -23 - float(r128_input_i) try:
r128_data = json.loads(album_r128_json_text)
except json.JSONDecodeError:
if album_r128_matches:
try:
r128_data = json.loads(album_r128_matches[-1])
except Exception:
r128_data = None
r128_input_i = r128_data.get('input_i') if r128_data else None
try:
if r128_input_i:
r128_input_i_val = int(r128_input_i)
except (TypeError, ValueError):
r128_input_i_val = None
if r128_input_i_val is not None and math.isfinite(r128_input_i_val):
r128_gain_db = -23 - r128_input_i_val
r128_album_gain = int(round(r128_gain_db * 256)) r128_album_gain = int(round(r128_gain_db * 256))
if r128_album_gain < -32768: if r128_album_gain < -32768:
@@ -876,9 +964,6 @@ class AcousticBrainzNG:
elif r128_album_gain > 32767: elif r128_album_gain > 32767:
r128_album_gain = 32767 r128_album_gain = 32767
except (json.JSONDecodeError, ValueError, TypeError):
pass
result["r128_album_gain"] = r128_album_gain result["r128_album_gain"] = r128_album_gain
return result return result
@@ -1362,8 +1447,8 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
def update_concurrent_processes(): def update_concurrent_processes():
concurrent_analyses = self.concurrent_analyses_input.value() concurrent_analyses = self.concurrent_analyses_input.value()
musicnn_workers = self.musicnn_workers_input.value() musicnn_workers = self.musicnn_workers_input.value()
max_processes = concurrent_analyses + (concurrent_analyses * musicnn_workers) max_processes = (2 * concurrent_analyses) + (concurrent_analyses * musicnn_workers)
breakdown = f"[{concurrent_analyses} gaia processes + ({concurrent_analyses} x {musicnn_workers}) MusicNN processes]" breakdown = f"[(2 x {concurrent_analyses}) feature processes + ({concurrent_analyses} x {musicnn_workers}) MusicNN processes]"
self.concurrent_processes_display.setText(f"{breakdown} = <span style='font-weight: bold;'>{max_processes}</span>") self.concurrent_processes_display.setText(f"{breakdown} = <span style='font-weight: bold;'>{max_processes}</span>")
self.concurrent_analyses_input.valueChanged.connect(update_concurrent_processes) self.concurrent_analyses_input.valueChanged.connect(update_concurrent_processes)
@@ -1397,15 +1482,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
lambda: (self._check_binaries(show_success=True), None)[1] lambda: (self._check_binaries(show_success=True), None)[1]
) )
# jq path
self.jq_path_input = QtWidgets.QLineEdit(self)
self.jq_path_input.setPlaceholderText("Path to jq")
jq_layout = self._create_path_input_layout(
self.jq_path_input,
lambda: self._browse_file(self.jq_path_input),
lambda: (self._check_binaries(show_success=True), None)[1]
)
# Models path # Models path
self.models_path_input = QtWidgets.QLineEdit(self) self.models_path_input = QtWidgets.QLineEdit(self)
self.models_path_input.setPlaceholderText("Path to MusicNN models") self.models_path_input.setPlaceholderText("Path to MusicNN models")
@@ -1425,8 +1501,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
paths_layout.addWidget(QtWidgets.QLabel("FFmpeg", self)) paths_layout.addWidget(QtWidgets.QLabel("FFmpeg", self))
paths_layout.addLayout(ffmpeg_layout) paths_layout.addLayout(ffmpeg_layout)
paths_layout.addWidget(QtWidgets.QLabel("jq", self))
paths_layout.addLayout(jq_layout)
paths_layout.addWidget(QtWidgets.QLabel("Binaries", self)) paths_layout.addWidget(QtWidgets.QLabel("Binaries", self))
paths_layout.addLayout(binaries_layout) paths_layout.addLayout(binaries_layout)
paths_layout.addWidget(QtWidgets.QLabel("Models", self)) paths_layout.addWidget(QtWidgets.QLabel("Models", self))
@@ -1454,11 +1528,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty FFmpeg path.") QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty FFmpeg path.")
return False return False
jq_path = self.jq_path_input.text()
if not jq_path or not os.path.exists(jq_path):
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty jq path.")
return False
missing_binaries = [] missing_binaries = []
for binary in REQUIRED_BINARIES: for binary in REQUIRED_BINARIES:
binary_path = os.path.join(binaries_path, binary) binary_path = os.path.join(binaries_path, binary)
@@ -1481,20 +1550,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
missing_binaries.append("FFmpeg (unable to execute)") missing_binaries.append("FFmpeg (unable to execute)")
log.error(f"Exception running FFmpeg version check: {e}") log.error(f"Exception running FFmpeg version check: {e}")
try:
result = subprocess.run([jq_path, "--version"], capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
if result.returncode != 0 or not result.stdout.startswith("jq-"):
missing_binaries.append("jq (invalid executable)")
if result.returncode != 0:
log.error(f"jq version check failed with exit code {result.returncode}")
if result.stdout:
log.error(f"jq stdout: {result.stdout}")
if result.stderr:
log.error(f"jq stderr: {result.stderr}")
except Exception as e:
missing_binaries.append("jq (unable to execute)")
log.error(f"Exception running jq version check: {e}")
if missing_binaries: if missing_binaries:
message = f"Missing binaries:\n" + "\n".join(f"{binary}" for binary in missing_binaries) message = f"Missing binaries:\n" + "\n".join(f"{binary}" for binary in missing_binaries)
QtWidgets.QMessageBox.warning(self, "Binaries", message) QtWidgets.QMessageBox.warning(self, "Binaries", message)
@@ -1586,7 +1641,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
self.binaries_path_input.setText(config.setting["acousticbrainz_ng_binaries_path"]) self.binaries_path_input.setText(config.setting["acousticbrainz_ng_binaries_path"])
self.ffmpeg_path_input.setText(config.setting["acousticbrainz_ng_ffmpeg_path"]) self.ffmpeg_path_input.setText(config.setting["acousticbrainz_ng_ffmpeg_path"])
self.jq_path_input.setText(config.setting["acousticbrainz_ng_jq_path"])
self.models_path_input.setText(config.setting["acousticbrainz_ng_models_path"]) self.models_path_input.setText(config.setting["acousticbrainz_ng_models_path"])
self.cache_path_input.setText(config.setting["acousticbrainz_ng_cache_path"]) self.cache_path_input.setText(config.setting["acousticbrainz_ng_cache_path"])
@@ -1614,7 +1668,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
config.setting["acousticbrainz_ng_binaries_path"] = self.binaries_path_input.text() config.setting["acousticbrainz_ng_binaries_path"] = self.binaries_path_input.text()
config.setting["acousticbrainz_ng_ffmpeg_path"] = self.ffmpeg_path_input.text() config.setting["acousticbrainz_ng_ffmpeg_path"] = self.ffmpeg_path_input.text()
config.setting["acousticbrainz_ng_jq_path"] = self.jq_path_input.text()
config.setting["acousticbrainz_ng_models_path"] = self.models_path_input.text() config.setting["acousticbrainz_ng_models_path"] = self.models_path_input.text()
config.setting["acousticbrainz_ng_cache_path"] = self.cache_path_input.text() config.setting["acousticbrainz_ng_cache_path"] = self.cache_path_input.text()

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -18,7 +18,7 @@ External dependencies:
</ul> </ul>
<strong>This plugin is CPU heavy!</strong> <strong>This plugin is CPU heavy!</strong>
""" """
PLUGIN_VERSION = "1.1.1" PLUGIN_VERSION = "1.1.2"
PLUGIN_API_VERSIONS = ["2.7", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13"] PLUGIN_API_VERSIONS = ["2.7", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13"]
PLUGIN_LICENSE = "GPL-2.0-or-later" PLUGIN_LICENSE = "GPL-2.0-or-later"
PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html" PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html"
@@ -46,8 +46,9 @@ OPTIONAL_MODELS: List[Tuple[str, str]] = [
] ]
REQUIRED_BINARIES: List[str] = [ REQUIRED_BINARIES: List[str] = [
"streaming_extractor_music", "streaming_rhythmextractor_multifeature",
"streaming_musicnn_predict", "streaming_musicnn_predict",
"streaming_key",
"streaming_md5", "streaming_md5",
] ]
@@ -57,7 +58,6 @@ ENV['TF_ENABLE_ONEDNN_OPTS'] = "0"
CONFIG_OPTIONS = [ CONFIG_OPTIONS = [
TextOption("setting", "acousticbrainz_ng_binaries_path", os.path.join(os.path.dirname(__file__), "bin")), TextOption("setting", "acousticbrainz_ng_binaries_path", os.path.join(os.path.dirname(__file__), "bin")),
TextOption("setting", "acousticbrainz_ng_ffmpeg_path", os.path.join(os.path.dirname(sys.executable), "ffmpeg" + (".exe" if os.name == "nt" else ""))), TextOption("setting", "acousticbrainz_ng_ffmpeg_path", os.path.join(os.path.dirname(sys.executable), "ffmpeg" + (".exe" if os.name == "nt" else ""))),
TextOption("setting", "acousticbrainz_ng_jq_path", os.path.join(os.path.dirname(sys.executable), "jq" + (".exe" if os.name == "nt" else ""))),
TextOption("setting", "acousticbrainz_ng_models_path", os.path.join(os.path.dirname(__file__), "models")), TextOption("setting", "acousticbrainz_ng_models_path", os.path.join(os.path.dirname(__file__), "models")),
TextOption("setting", "acousticbrainz_ng_cache_path", os.path.join(os.path.dirname(__file__), "cache")), TextOption("setting", "acousticbrainz_ng_cache_path", os.path.join(os.path.dirname(__file__), "cache")),
IntOption("setting", "acousticbrainz_ng_max_musicnn_workers", 4), IntOption("setting", "acousticbrainz_ng_max_musicnn_workers", 4),
@@ -68,5 +68,3 @@ CONFIG_OPTIONS = [
BoolOption("setting", "acousticbrainz_ng_calculate_replaygain", True), BoolOption("setting", "acousticbrainz_ng_calculate_replaygain", True),
BoolOption("setting", "acousticbrainz_ng_save_fingerprint", True) BoolOption("setting", "acousticbrainz_ng_save_fingerprint", True)
] ]
GAIA_KEY_ALGORITHMS = ["edma", "krumhansl", "temperley"]