Compare commits
3 Commits
c62f7f9e31
...
1d6d3405c3
| Author | SHA1 | Date | |
|---|---|---|---|
|
1d6d3405c3
|
|||
|
f43eeaafe0
|
|||
|
a8794fa239
|
559
__init__.py
559
__init__.py
@@ -1,4 +1,5 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import shutil
|
||||
import subprocess
|
||||
@@ -8,10 +9,10 @@ import struct
|
||||
import threading
|
||||
import concurrent.futures
|
||||
import tempfile
|
||||
|
||||
import math
|
||||
import yaml
|
||||
from functools import partial
|
||||
from typing import List, Tuple, Dict, Optional
|
||||
|
||||
from picard import config, log
|
||||
from picard.ui.itemviews import (
|
||||
BaseAction,
|
||||
@@ -22,10 +23,7 @@ from picard.track import Track
|
||||
from picard.album import Album
|
||||
from picard.ui.options import OptionsPage, register_options_page
|
||||
from picard.util import thread
|
||||
from picard.coverart.image import (
|
||||
CoverArtImage,
|
||||
CoverArtImageError,
|
||||
)
|
||||
from picard.coverart.image import CoverArtImage
|
||||
from PyQt5 import QtWidgets, QtCore
|
||||
|
||||
_analysis_semaphore = None
|
||||
@@ -56,20 +54,23 @@ class AcousticBrainzNG:
|
||||
binary_path += '.exe'
|
||||
return binary_path
|
||||
|
||||
def _get_binary_paths(self) -> Tuple[str, str]:
|
||||
def _get_binary_paths(self) -> Tuple[str, str, str]:
|
||||
binaries_path = config.setting["acousticbrainz_ng_binaries_path"]
|
||||
if not binaries_path:
|
||||
raise ValueError("Binaries path not configured")
|
||||
|
||||
musicnn_binary_path = self._get_binary_path("streaming_musicnn_predict", binaries_path)
|
||||
gaia_binary_path = self._get_binary_path("streaming_extractor_music", binaries_path)
|
||||
|
||||
rhythm_binary_path = self._get_binary_path("streaming_rhythmextractor_multifeature", binaries_path)
|
||||
key_binary_path = self._get_binary_path("streaming_key", binaries_path)
|
||||
|
||||
if not os.path.exists(musicnn_binary_path):
|
||||
raise FileNotFoundError(f"Binary {musicnn_binary_path} not found")
|
||||
if not os.path.exists(gaia_binary_path):
|
||||
raise FileNotFoundError(f"Binary {gaia_binary_path} not found")
|
||||
|
||||
return musicnn_binary_path, gaia_binary_path
|
||||
if not os.path.exists(rhythm_binary_path):
|
||||
raise FileNotFoundError(f"Binary {rhythm_binary_path} not found")
|
||||
if not os.path.exists(key_binary_path):
|
||||
raise FileNotFoundError(f"Binary {key_binary_path} not found")
|
||||
|
||||
return musicnn_binary_path, rhythm_binary_path, key_binary_path
|
||||
|
||||
def _run_musicnn_models(self, models: List[Tuple[str, str]], musicnn_binary_path: str, file: str, output_path: str) -> bool:
|
||||
models_path = config.setting["acousticbrainz_ng_models_path"]
|
||||
@@ -133,7 +134,7 @@ class AcousticBrainzNG:
|
||||
return False
|
||||
|
||||
try:
|
||||
musicnn_binary_path, gaia_binary_path = self._get_binary_paths()
|
||||
musicnn_binary_path, rhythm_binary_path, key_binary_path = self._get_binary_paths()
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
log.error(str(e))
|
||||
return False
|
||||
@@ -147,80 +148,86 @@ class AcousticBrainzNG:
|
||||
log.error(f"Error generating cache folder: {e}")
|
||||
return False
|
||||
|
||||
gaia_success = True
|
||||
def run_gaia():
|
||||
nonlocal gaia_success
|
||||
if os.path.exists(os.path.join(output_path, "gaia.json")):
|
||||
return
|
||||
|
||||
jq_path = config.setting["acousticbrainz_ng_jq_path"]
|
||||
if not jq_path or not os.path.exists(jq_path):
|
||||
log.error("jq binary path not configured or invalid")
|
||||
gaia_success = False
|
||||
rhythm_success = True
|
||||
def run_rhythm():
|
||||
nonlocal rhythm_success
|
||||
if os.path.exists(os.path.join(output_path, "rhythm.yaml")):
|
||||
return
|
||||
|
||||
gaia_proc = subprocess.run(
|
||||
[gaia_binary_path, file, "-"],
|
||||
rhythm_proc = subprocess.run(
|
||||
[rhythm_binary_path, file],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=ENV,
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if gaia_proc.returncode != 0:
|
||||
gaia_success = False
|
||||
log.error(f"Gaia binary {gaia_binary_path} failed on file {file} with exit code {gaia_proc.returncode}")
|
||||
if gaia_proc.stdout:
|
||||
log.error(f"Gaia stdout: {gaia_proc.stdout}")
|
||||
if gaia_proc.stderr:
|
||||
log.error(f"Gaia stderr: {gaia_proc.stderr}")
|
||||
return
|
||||
|
||||
jq_filter = (
|
||||
"{ rhythm: { bpm: .rhythm.bpm }, "
|
||||
"tonal: { "
|
||||
"chords_changes_rate: .tonal.chords_changes_rate, "
|
||||
"chords_key: .tonal.chords_key, "
|
||||
"chords_scale: .tonal.chords_scale, "
|
||||
"key_temperley: { key: .tonal.key_temperley.key, scale: .tonal.key_temperley.scale, strength: .tonal.key_temperley.strength }, "
|
||||
"key_krumhansl: { key: .tonal.key_krumhansl.key, scale: .tonal.key_krumhansl.scale, strength: .tonal.key_krumhansl.strength }, "
|
||||
"key_edma: { key: .tonal.key_edma.key, scale: .tonal.key_edma.scale, strength: .tonal.key_edma.strength } "
|
||||
"} }"
|
||||
)
|
||||
|
||||
jq_proc = subprocess.run(
|
||||
[jq_path, jq_filter],
|
||||
input=gaia_proc.stdout,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=ENV,
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if jq_proc.returncode != 0:
|
||||
gaia_success = False
|
||||
log.error(f"jq failed to post-process Gaia JSON with exit code {jq_proc.returncode}")
|
||||
if jq_proc.stdout:
|
||||
log.error(f"jq stdout: {jq_proc.stdout}")
|
||||
if jq_proc.stderr:
|
||||
log.error(f"jq stderr: {jq_proc.stderr}")
|
||||
if rhythm_proc.returncode != 0:
|
||||
rhythm_success = False
|
||||
log.error(f"Rhythm binary {rhythm_binary_path} failed on file {file} with exit code {rhythm_proc.returncode}")
|
||||
if rhythm_proc.stdout:
|
||||
log.error(f"Rhythm stdout: {rhythm_proc.stdout}")
|
||||
if rhythm_proc.stderr:
|
||||
log.error(f"Rhythm stderr: {rhythm_proc.stderr}")
|
||||
return
|
||||
|
||||
try:
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
with open(os.path.join(output_path, "gaia.json"), "w", encoding="utf-8") as f:
|
||||
f.write(jq_proc.stdout)
|
||||
stdout = rhythm_proc.stdout or ""
|
||||
lines = stdout.splitlines(keepends=True)
|
||||
if not lines:
|
||||
raise ValueError("Rhythm binary produced no stdout")
|
||||
|
||||
yaml_lines = lines[-5:] if len(lines) >= 5 else lines
|
||||
yaml_str = "".join(yaml_lines)
|
||||
if not yaml_str.strip():
|
||||
raise ValueError("Empty YAML section extracted from rhythm binary output")
|
||||
|
||||
out_file = os.path.join(output_path, "rhythm.yaml")
|
||||
with open(out_file, "w", encoding="utf-8") as f:
|
||||
f.write(yaml_str)
|
||||
except Exception as e:
|
||||
gaia_success = False
|
||||
log.error(f"Failed to write processed Gaia JSON: {e}")
|
||||
rhythm_success = False
|
||||
log.error(f"Failed to extract/save rhythm.yaml from rhythm binary stdout: {e}")
|
||||
if rhythm_proc.stdout:
|
||||
log.error(f"Rhythm stdout: {rhythm_proc.stdout}")
|
||||
if rhythm_proc.stderr:
|
||||
log.error(f"Rhythm stderr: {rhythm_proc.stderr}")
|
||||
return
|
||||
|
||||
gaia_thread = threading.Thread(target=run_gaia)
|
||||
gaia_thread.start()
|
||||
key_success = True
|
||||
def run_key():
|
||||
nonlocal key_success
|
||||
if os.path.exists(os.path.join(output_path, "key.yaml")):
|
||||
return
|
||||
|
||||
key_proc = subprocess.run(
|
||||
[key_binary_path, file, os.path.join(output_path, "key.yaml")],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
env=ENV,
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if key_proc.returncode != 0:
|
||||
key_success = False
|
||||
log.error(f"Key binary {key_binary_path} failed on file {file} with exit code {key_proc.returncode}")
|
||||
if key_proc.stdout:
|
||||
log.error(f"Key stdout: {key_proc.stdout}")
|
||||
if key_proc.stderr:
|
||||
log.error(f"Key stderr: {key_proc.stderr}")
|
||||
return
|
||||
|
||||
rhythm_thread = threading.Thread(target=run_rhythm)
|
||||
rhythm_thread.start()
|
||||
|
||||
key_thread = threading.Thread(target=run_key)
|
||||
key_thread.start()
|
||||
|
||||
musicnn_success = self._run_musicnn_models(REQUIRED_MODELS, musicnn_binary_path, file, output_path)
|
||||
gaia_thread.join()
|
||||
|
||||
return gaia_success and musicnn_success
|
||||
rhythm_thread.join()
|
||||
key_thread.join()
|
||||
|
||||
return rhythm_success and key_success and musicnn_success
|
||||
|
||||
def analyze_optional(self, metadata: Dict, file: str) -> bool:
|
||||
if not self._check_binaries():
|
||||
@@ -232,7 +239,7 @@ class AcousticBrainzNG:
|
||||
return False
|
||||
|
||||
try:
|
||||
musicnn_binary_path, _ = self._get_binary_paths()
|
||||
musicnn_binary_path = self._get_binary_paths()[0]
|
||||
except (ValueError, FileNotFoundError) as e:
|
||||
log.error(str(e))
|
||||
return False
|
||||
@@ -329,50 +336,48 @@ class AcousticBrainzNG:
|
||||
metadata['mood'] = moods
|
||||
metadata['tags'] = tags
|
||||
|
||||
gaia_data = {}
|
||||
gaia_json_path = os.path.join(output_path, "gaia.json")
|
||||
rhythm_data = {}
|
||||
rhythm_yaml_path = os.path.join(output_path, "rhythm.yaml")
|
||||
|
||||
if os.path.exists(gaia_json_path):
|
||||
try:
|
||||
with open(gaia_json_path, 'r', encoding='utf-8') as f:
|
||||
gaia_data = json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
log.error(f"Error reading Gaia JSON file: {e}")
|
||||
key_data = {}
|
||||
key_yaml_path = os.path.join(output_path, "key.yaml")
|
||||
|
||||
if os.path.exists(rhythm_yaml_path):
|
||||
with open(rhythm_yaml_path, 'r', encoding='utf-8') as f:
|
||||
loaded = yaml.safe_load(f)
|
||||
|
||||
if not isinstance(loaded, dict):
|
||||
log.error("Invalid rhythm YAML format: expected a mapping at the top level")
|
||||
return False
|
||||
|
||||
rhythm_data = loaded
|
||||
else:
|
||||
log.error(f"Gaia JSON file not found: {gaia_json_path}")
|
||||
log.error(f"Rhythm YAML file not found: {rhythm_yaml_path}")
|
||||
return False
|
||||
|
||||
if os.path.exists(key_yaml_path):
|
||||
with open(key_yaml_path, 'r', encoding='utf-8') as f:
|
||||
loaded = yaml.safe_load(f)
|
||||
if not isinstance(loaded, dict):
|
||||
log.error("Invalid key YAML format: expected a mapping at the top level")
|
||||
return False
|
||||
|
||||
key_data = loaded
|
||||
else:
|
||||
log.error(f"Key YAML file not found: {key_yaml_path}")
|
||||
return False
|
||||
|
||||
try:
|
||||
metadata["bpm"] = int(round(gaia_data["rhythm"]["bpm"]))
|
||||
|
||||
metadata["bpm"] = int(round(rhythm_data["bpm"]))
|
||||
metadata["key"] = "o" if key_data["tonal"]["key_scale"] == "off" else f"{key_data['tonal']['key']}{'m' if key_data['tonal']['key_scale'] == 'minor' else ''}"
|
||||
|
||||
if config.setting["acousticbrainz_ng_save_raw"]:
|
||||
metadata["ab:lo:tonal:chords_changes_rate"] = gaia_data["tonal"]["chords_changes_rate"]
|
||||
metadata["ab:lo:tonal:chords_key"] = gaia_data["tonal"]["chords_key"]
|
||||
metadata["ab:lo:tonal:chords_scale"] = gaia_data["tonal"]["chords_scale"]
|
||||
|
||||
highestStrength = -1
|
||||
selectedAlgorithm = None
|
||||
|
||||
for algorithm in GAIA_KEY_ALGORITHMS:
|
||||
key_data = gaia_data["tonal"][f"key_{algorithm}"]
|
||||
|
||||
if key_data["strength"] > highestStrength:
|
||||
highestStrength = key_data["strength"]
|
||||
selectedAlgorithm = algorithm
|
||||
|
||||
if selectedAlgorithm:
|
||||
selected_key_data = gaia_data["tonal"][f"key_{selectedAlgorithm}"]
|
||||
|
||||
metadata["key"] = "o" if selected_key_data["scale"] == "off" else f"{selected_key_data['key']}{'m' if selected_key_data['scale'] == 'minor' else ''}"
|
||||
|
||||
if config.setting["acousticbrainz_ng_save_raw"]:
|
||||
metadata["ab:lo:tonal:key_scale"] = selected_key_data["scale"]
|
||||
metadata["ab:lo:tonal:key_key"] = selected_key_data["key"]
|
||||
metadata["ab:lo:tonal:key_scale"] = key_data["tonal"]["key_scale"]
|
||||
metadata["ab:lo:tonal:key_key"] = key_data["tonal"]["key"]
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
log.error(f"Error processing gaia data: {e}")
|
||||
log.error(f"Error processing feature data: {e}")
|
||||
return False
|
||||
|
||||
def parse_optional(self, metadata: Dict, file: str) -> bool:
|
||||
@@ -671,7 +676,7 @@ class AcousticBrainzNG:
|
||||
if not ffmpeg_path:
|
||||
raise ValueError("FFmpeg path not configured")
|
||||
|
||||
replaygain_lufs_result = subprocess.run(
|
||||
replaygain_proc = subprocess.run(
|
||||
[ffmpeg_path, "-hide_banner", "-i", file_path, "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
@@ -679,40 +684,66 @@ class AcousticBrainzNG:
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if replaygain_lufs_result.returncode != 0:
|
||||
log.error(f"FFmpeg failed for ReplayGain LUFS calculation on file {file_path} with exit code {replaygain_lufs_result.returncode}")
|
||||
if replaygain_lufs_result.stdout:
|
||||
log.error(f"FFmpeg stdout: {replaygain_lufs_result.stdout}")
|
||||
if replaygain_lufs_result.stderr:
|
||||
log.error(f"FFmpeg stderr: {replaygain_lufs_result.stderr}")
|
||||
if replaygain_proc.returncode != 0:
|
||||
log.error(f"FFmpeg failed for ReplayGain LUFS calculation on file {file_path} with exit code {replaygain_proc.returncode}")
|
||||
if replaygain_proc.stdout:
|
||||
log.error(f"FFmpeg stdout: {replaygain_proc.stdout}")
|
||||
if replaygain_proc.stderr:
|
||||
log.error(f"FFmpeg stderr: {replaygain_proc.stderr}")
|
||||
return {}
|
||||
|
||||
replaygain_log = replaygain_proc.stderr or replaygain_proc.stdout
|
||||
|
||||
replaygain_match = re.search(r'\{.*?\}', replaygain_log, re.S)
|
||||
replaygain_matches = re.findall(r'\{.*?\}', replaygain_log, re.S) if not replaygain_match else None
|
||||
replaygain_json_text = replaygain_match.group(0) if replaygain_match else (replaygain_matches[0] if replaygain_matches else None)
|
||||
|
||||
replaygain_gain = None
|
||||
replaygain_peak = None
|
||||
replaygain_range = None
|
||||
|
||||
try:
|
||||
json_start = replaygain_lufs_result.stderr.find('{')
|
||||
if json_start != -1:
|
||||
json_str = replaygain_lufs_result.stderr[json_start:]
|
||||
json_end = json_str.find('}') + 1
|
||||
if json_end > 0:
|
||||
loudnorm_data = json.loads(json_str[:json_end])
|
||||
input_i = loudnorm_data.get('input_i')
|
||||
input_tp = loudnorm_data.get('input_tp')
|
||||
input_lra = loudnorm_data.get('input_lra')
|
||||
|
||||
if input_i and input_i != "-inf":
|
||||
replaygain_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - float(input_i):.2f}"
|
||||
replaygain_lufs_result: dict | None = None
|
||||
|
||||
if replaygain_json_text:
|
||||
try:
|
||||
replaygain_lufs_result = json.loads(replaygain_json_text)
|
||||
except json.JSONDecodeError:
|
||||
if replaygain_matches:
|
||||
try:
|
||||
replaygain_lufs_result = json.loads(replaygain_matches[-1])
|
||||
except Exception:
|
||||
replaygain_lufs_result = None
|
||||
|
||||
if input_tp and input_tp != "-inf":
|
||||
replaygain_peak = f"{10 ** (float(input_tp) / 20):.6f}"
|
||||
|
||||
if input_lra and input_lra != "-inf":
|
||||
replaygain_range = f"{float(input_lra):.2f}"
|
||||
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
input_i = replaygain_lufs_result.get('input_i') if replaygain_lufs_result else None
|
||||
input_tp = replaygain_lufs_result.get('input_tp') if replaygain_lufs_result else None
|
||||
input_lra = replaygain_lufs_result.get('input_lra') if replaygain_lufs_result else None
|
||||
|
||||
try:
|
||||
if input_i:
|
||||
input_i_val = float(input_i)
|
||||
except (TypeError, ValueError):
|
||||
input_i_val = None
|
||||
|
||||
try:
|
||||
if input_tp:
|
||||
input_tp_val = float(input_tp)
|
||||
except (TypeError, ValueError):
|
||||
input_tp_val = None
|
||||
|
||||
try:
|
||||
if input_lra:
|
||||
input_lra_val = float(input_lra)
|
||||
except (TypeError, ValueError):
|
||||
input_lra_val = None
|
||||
|
||||
if input_i_val is not None and math.isfinite(input_i_val):
|
||||
replaygain_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - input_i_val:.2f}"
|
||||
|
||||
if input_tp_val is not None and math.isfinite(input_tp_val):
|
||||
replaygain_peak = f"{10 ** (input_tp_val / 20):.6f}"
|
||||
|
||||
if input_lra_val is not None and math.isfinite(input_lra_val):
|
||||
replaygain_range = f"{input_lra_val:.2f}"
|
||||
|
||||
result: Dict = {
|
||||
"replaygain_track_gain": replaygain_gain,
|
||||
@@ -721,7 +752,7 @@ class AcousticBrainzNG:
|
||||
"replaygain_reference_loudness": f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18):.2f}"
|
||||
}
|
||||
|
||||
r128_result = subprocess.run(
|
||||
r128_proc = subprocess.run(
|
||||
[ffmpeg_path, "-hide_banner", "-i", file_path, "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
@@ -729,36 +760,50 @@ class AcousticBrainzNG:
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if r128_result.returncode != 0:
|
||||
log.error(f"FFmpeg failed for R128 calculation on file {file_path} with exit code {r128_result.returncode}")
|
||||
if r128_result.stdout:
|
||||
log.error(f"FFmpeg stdout: {r128_result.stdout}")
|
||||
if r128_result.stderr:
|
||||
log.error(f"FFmpeg stderr: {r128_result.stderr}")
|
||||
if r128_proc.returncode != 0:
|
||||
log.error(f"FFmpeg failed for R128 calculation on file {file_path} with exit code {r128_proc.returncode}")
|
||||
if r128_proc.stdout:
|
||||
log.error(f"FFmpeg stdout: {r128_proc.stdout}")
|
||||
if r128_proc.stderr:
|
||||
log.error(f"FFmpeg stderr: {r128_proc.stderr}")
|
||||
return result
|
||||
|
||||
|
||||
r128_log = r128_proc.stderr or r128_proc.stdout
|
||||
|
||||
r128_match = re.search(r'\{.*?\}', r128_log, re.S)
|
||||
r128_matches = re.findall(r'\{.*?\}', r128_log, re.S) if not r128_match else None
|
||||
r128_json_text = r128_match.group(0) if r128_match else (r128_matches[0] if r128_matches else None)
|
||||
|
||||
r128_track_gain = None
|
||||
|
||||
r128_data: dict | None = None
|
||||
|
||||
if r128_json_text:
|
||||
try:
|
||||
r128_data = json.loads(r128_json_text)
|
||||
except json.JSONDecodeError:
|
||||
if r128_matches:
|
||||
try:
|
||||
r128_data = json.loads(r128_matches[-1])
|
||||
except Exception:
|
||||
r128_data = None
|
||||
|
||||
r128_input_i = r128_data.get('input_i') if r128_data else None
|
||||
|
||||
try:
|
||||
json_start = r128_result.stderr.find('{')
|
||||
if json_start != -1:
|
||||
json_str = r128_result.stderr[json_start:]
|
||||
json_end = json_str.find('}') + 1
|
||||
if json_end > 0:
|
||||
r128_data = json.loads(json_str[:json_end])
|
||||
r128_input_i = r128_data.get('input_i')
|
||||
|
||||
if r128_input_i and r128_input_i != "-inf":
|
||||
r128_gain_db = -23 - float(r128_input_i)
|
||||
r128_track_gain = int(round(r128_gain_db * 256))
|
||||
|
||||
if r128_track_gain < -32768:
|
||||
r128_track_gain = -32768
|
||||
elif r128_track_gain > 32767:
|
||||
r128_track_gain = 32767
|
||||
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
if r128_input_i:
|
||||
r128_input_i_val = int(r128_input_i)
|
||||
except (TypeError, ValueError):
|
||||
r128_input_i_val = None
|
||||
|
||||
if r128_input_i_val is not None and math.isfinite(r128_input_i_val):
|
||||
r128_gain_db = -23 - r128_input_i_val
|
||||
r128_track_gain = int(round(r128_gain_db * 256))
|
||||
|
||||
if r128_track_gain < -32768:
|
||||
r128_track_gain = -32768
|
||||
elif r128_track_gain > 32767:
|
||||
r128_track_gain = 32767
|
||||
|
||||
result["r128_track_gain"] = r128_track_gain
|
||||
|
||||
@@ -787,7 +832,7 @@ class AcousticBrainzNG:
|
||||
concat_file_path = concat_file.name
|
||||
|
||||
try:
|
||||
album_replaygain_result = subprocess.run(
|
||||
album_replaygain_proc = subprocess.run(
|
||||
[ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path,
|
||||
"-vn", "-af", f"loudnorm=I={config.setting['acousticbrainz_ng_replaygain_reference_loudness']}:print_format=json", "-f", "null", "-"],
|
||||
capture_output=True,
|
||||
@@ -796,49 +841,75 @@ class AcousticBrainzNG:
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if album_replaygain_result.returncode != 0:
|
||||
log.error(f"FFmpeg failed for album ReplayGain calculation on {len(album_track_files)} files with exit code {album_replaygain_result.returncode}")
|
||||
if album_replaygain_proc.returncode != 0:
|
||||
log.error(f"FFmpeg failed for album ReplayGain calculation on {len(album_track_files)} files with exit code {album_replaygain_proc.returncode}")
|
||||
log.error(f"Album files: {', '.join(album_track_files)}")
|
||||
if album_replaygain_result.stdout:
|
||||
log.error(f"FFmpeg stdout: {album_replaygain_result.stdout}")
|
||||
if album_replaygain_result.stderr:
|
||||
log.error(f"FFmpeg stderr: {album_replaygain_result.stderr}")
|
||||
if album_replaygain_proc.stdout:
|
||||
log.error(f"FFmpeg stdout: {album_replaygain_proc.stdout}")
|
||||
if album_replaygain_proc.stderr:
|
||||
log.error(f"FFmpeg stderr: {album_replaygain_proc.stderr}")
|
||||
return {}
|
||||
|
||||
|
||||
album_replaygain_log = album_replaygain_proc.stderr or album_replaygain_proc.stdout
|
||||
|
||||
album_replaygain_match = re.search(r'\{.*?\}', album_replaygain_log, re.S)
|
||||
album_replaygain_matches = re.findall(r'\{.*?\}', album_replaygain_log, re.S) if not album_replaygain_match else None
|
||||
album_replaygain_json_text = album_replaygain_match.group(0) if album_replaygain_match else (album_replaygain_matches[0] if album_replaygain_matches else None)
|
||||
|
||||
album_gain = None
|
||||
album_peak = None
|
||||
album_range = None
|
||||
|
||||
try:
|
||||
json_start = album_replaygain_result.stderr.find('{')
|
||||
if json_start != -1:
|
||||
json_str = album_replaygain_result.stderr[json_start:]
|
||||
json_end = json_str.find('}') + 1
|
||||
if json_end > 0:
|
||||
loudnorm_data = json.loads(json_str[:json_end])
|
||||
input_i = loudnorm_data.get('input_i')
|
||||
input_tp = loudnorm_data.get('input_tp')
|
||||
input_lra = loudnorm_data.get('input_lra')
|
||||
|
||||
if input_i and input_i != "-inf":
|
||||
album_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - float(input_i):.2f}"
|
||||
|
||||
if input_tp and input_tp != "-inf":
|
||||
album_peak = f"{10 ** (float(input_tp) / 20):.6f}"
|
||||
|
||||
if input_lra and input_lra != "-inf":
|
||||
album_range = f"{float(input_lra):.2f}"
|
||||
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
loudnorm_data: dict | None = None
|
||||
|
||||
if album_replaygain_json_text:
|
||||
try:
|
||||
loudnorm_data = json.loads(album_replaygain_json_text)
|
||||
except json.JSONDecodeError:
|
||||
if album_replaygain_matches:
|
||||
try:
|
||||
loudnorm_data = json.loads(album_replaygain_matches[-1])
|
||||
except Exception:
|
||||
loudnorm_data = None
|
||||
|
||||
input_i = loudnorm_data.get('input_i') if loudnorm_data else None
|
||||
input_tp = loudnorm_data.get('input_tp') if loudnorm_data else None
|
||||
input_lra = loudnorm_data.get('input_lra') if loudnorm_data else None
|
||||
|
||||
try:
|
||||
if input_i:
|
||||
input_i_val = float(input_i)
|
||||
except (TypeError, ValueError):
|
||||
input_i_val = None
|
||||
|
||||
try:
|
||||
if input_tp:
|
||||
input_tp_val = float(input_tp)
|
||||
except (TypeError, ValueError):
|
||||
input_tp_val = None
|
||||
|
||||
try:
|
||||
if input_lra:
|
||||
input_lra_val = float(input_lra)
|
||||
except (TypeError, ValueError):
|
||||
input_lra_val = None
|
||||
|
||||
if input_i_val is not None and math.isfinite(input_i_val):
|
||||
album_gain = f"{(config.setting['acousticbrainz_ng_replaygain_reference_loudness'] or -18) - input_i_val:.2f}"
|
||||
|
||||
if input_tp_val is not None and math.isfinite(input_tp_val):
|
||||
album_peak = f"{10 ** (input_tp_val / 20):.6f}"
|
||||
|
||||
if input_lra_val is not None and math.isfinite(input_lra_val):
|
||||
album_range = f"{input_lra_val:.2f}"
|
||||
|
||||
result: Dict = {
|
||||
"replaygain_album_gain": album_gain,
|
||||
"replaygain_album_peak": album_peak,
|
||||
"replaygain_album_range": album_range
|
||||
}
|
||||
|
||||
album_r128_result = subprocess.run(
|
||||
album_r128_proc = subprocess.run(
|
||||
[ffmpeg_path, "-hide_banner", "-f", "concat", "-safe", "0", "-i", concat_file_path,
|
||||
"-vn", "-af", "loudnorm=I=-23:print_format=json", "-f", "null", "-"],
|
||||
capture_output=True,
|
||||
@@ -847,37 +918,51 @@ class AcousticBrainzNG:
|
||||
creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0
|
||||
)
|
||||
|
||||
if album_r128_result.returncode != 0:
|
||||
log.error(f"FFmpeg failed for album R128 calculation on {len(album_track_files)} files with exit code {album_r128_result.returncode}")
|
||||
if album_r128_proc.returncode != 0:
|
||||
log.error(f"FFmpeg failed for album R128 calculation on {len(album_track_files)} files with exit code {album_r128_proc.returncode}")
|
||||
log.error(f"Album files: {', '.join(album_track_files)}")
|
||||
if album_r128_result.stdout:
|
||||
log.error(f"FFmpeg stdout: {album_r128_result.stdout}")
|
||||
if album_r128_result.stderr:
|
||||
log.error(f"FFmpeg stderr: {album_r128_result.stderr}")
|
||||
if album_r128_proc.stdout:
|
||||
log.error(f"FFmpeg stdout: {album_r128_proc.stdout}")
|
||||
if album_r128_proc.stderr:
|
||||
log.error(f"FFmpeg stderr: {album_r128_proc.stderr}")
|
||||
return result
|
||||
|
||||
album_r128_log = album_r128_proc.stderr or album_r128_proc.stdout
|
||||
|
||||
album_r128_match = re.search(r'\{.*?\}', album_r128_log, re.S)
|
||||
album_r128_matches = re.findall(r'\{.*?\}', album_r128_log, re.S) if not album_r128_match else None
|
||||
album_r128_json_text = album_r128_match.group(0) if album_r128_match else (album_r128_matches[0] if album_r128_matches else None)
|
||||
|
||||
r128_album_gain = None
|
||||
|
||||
r128_data: dict | None = None
|
||||
|
||||
if album_r128_json_text:
|
||||
try:
|
||||
r128_data = json.loads(album_r128_json_text)
|
||||
except json.JSONDecodeError:
|
||||
if album_r128_matches:
|
||||
try:
|
||||
r128_data = json.loads(album_r128_matches[-1])
|
||||
except Exception:
|
||||
r128_data = None
|
||||
|
||||
r128_input_i = r128_data.get('input_i') if r128_data else None
|
||||
|
||||
try:
|
||||
json_start = album_r128_result.stderr.find('{')
|
||||
if json_start != -1:
|
||||
json_str = album_r128_result.stderr[json_start:]
|
||||
json_end = json_str.find('}') + 1
|
||||
if json_end > 0:
|
||||
r128_data = json.loads(json_str[:json_end])
|
||||
r128_input_i = r128_data.get('input_i')
|
||||
|
||||
if r128_input_i and r128_input_i != "-inf":
|
||||
r128_gain_db = -23 - float(r128_input_i)
|
||||
r128_album_gain = int(round(r128_gain_db * 256))
|
||||
|
||||
if r128_album_gain < -32768:
|
||||
r128_album_gain = -32768
|
||||
elif r128_album_gain > 32767:
|
||||
r128_album_gain = 32767
|
||||
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
if r128_input_i:
|
||||
r128_input_i_val = int(r128_input_i)
|
||||
except (TypeError, ValueError):
|
||||
r128_input_i_val = None
|
||||
|
||||
if r128_input_i_val is not None and math.isfinite(r128_input_i_val):
|
||||
r128_gain_db = -23 - r128_input_i_val
|
||||
r128_album_gain = int(round(r128_gain_db * 256))
|
||||
|
||||
if r128_album_gain < -32768:
|
||||
r128_album_gain = -32768
|
||||
elif r128_album_gain > 32767:
|
||||
r128_album_gain = 32767
|
||||
|
||||
result["r128_album_gain"] = r128_album_gain
|
||||
|
||||
@@ -1362,8 +1447,8 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
def update_concurrent_processes():
|
||||
concurrent_analyses = self.concurrent_analyses_input.value()
|
||||
musicnn_workers = self.musicnn_workers_input.value()
|
||||
max_processes = concurrent_analyses + (concurrent_analyses * musicnn_workers)
|
||||
breakdown = f"[{concurrent_analyses} gaia processes + ({concurrent_analyses} x {musicnn_workers}) MusicNN processes]"
|
||||
max_processes = (2 * concurrent_analyses) + (concurrent_analyses * musicnn_workers)
|
||||
breakdown = f"[(2 x {concurrent_analyses}) feature processes + ({concurrent_analyses} x {musicnn_workers}) MusicNN processes]"
|
||||
self.concurrent_processes_display.setText(f"{breakdown} = <span style='font-weight: bold;'>{max_processes}</span>")
|
||||
|
||||
self.concurrent_analyses_input.valueChanged.connect(update_concurrent_processes)
|
||||
@@ -1396,15 +1481,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
lambda: self._browse_file(self.ffmpeg_path_input),
|
||||
lambda: (self._check_binaries(show_success=True), None)[1]
|
||||
)
|
||||
|
||||
# jq path
|
||||
self.jq_path_input = QtWidgets.QLineEdit(self)
|
||||
self.jq_path_input.setPlaceholderText("Path to jq")
|
||||
jq_layout = self._create_path_input_layout(
|
||||
self.jq_path_input,
|
||||
lambda: self._browse_file(self.jq_path_input),
|
||||
lambda: (self._check_binaries(show_success=True), None)[1]
|
||||
)
|
||||
|
||||
# Models path
|
||||
self.models_path_input = QtWidgets.QLineEdit(self)
|
||||
@@ -1425,8 +1501,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
|
||||
paths_layout.addWidget(QtWidgets.QLabel("FFmpeg", self))
|
||||
paths_layout.addLayout(ffmpeg_layout)
|
||||
paths_layout.addWidget(QtWidgets.QLabel("jq", self))
|
||||
paths_layout.addLayout(jq_layout)
|
||||
paths_layout.addWidget(QtWidgets.QLabel("Binaries", self))
|
||||
paths_layout.addLayout(binaries_layout)
|
||||
paths_layout.addWidget(QtWidgets.QLabel("Models", self))
|
||||
@@ -1453,11 +1527,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
if not ffmpeg_path or not os.path.exists(ffmpeg_path):
|
||||
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty FFmpeg path.")
|
||||
return False
|
||||
|
||||
jq_path = self.jq_path_input.text()
|
||||
if not jq_path or not os.path.exists(jq_path):
|
||||
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty jq path.")
|
||||
return False
|
||||
|
||||
missing_binaries = []
|
||||
for binary in REQUIRED_BINARIES:
|
||||
@@ -1481,20 +1550,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
missing_binaries.append("FFmpeg (unable to execute)")
|
||||
log.error(f"Exception running FFmpeg version check: {e}")
|
||||
|
||||
try:
|
||||
result = subprocess.run([jq_path, "--version"], capture_output=True, text=True, creationflags=subprocess.CREATE_NO_WINDOW if os.name == 'nt' else 0)
|
||||
if result.returncode != 0 or not result.stdout.startswith("jq-"):
|
||||
missing_binaries.append("jq (invalid executable)")
|
||||
if result.returncode != 0:
|
||||
log.error(f"jq version check failed with exit code {result.returncode}")
|
||||
if result.stdout:
|
||||
log.error(f"jq stdout: {result.stdout}")
|
||||
if result.stderr:
|
||||
log.error(f"jq stderr: {result.stderr}")
|
||||
except Exception as e:
|
||||
missing_binaries.append("jq (unable to execute)")
|
||||
log.error(f"Exception running jq version check: {e}")
|
||||
|
||||
if missing_binaries:
|
||||
message = f"Missing binaries:\n" + "\n".join(f"• {binary}" for binary in missing_binaries)
|
||||
QtWidgets.QMessageBox.warning(self, "Binaries", message)
|
||||
@@ -1586,7 +1641,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
|
||||
self.binaries_path_input.setText(config.setting["acousticbrainz_ng_binaries_path"])
|
||||
self.ffmpeg_path_input.setText(config.setting["acousticbrainz_ng_ffmpeg_path"])
|
||||
self.jq_path_input.setText(config.setting["acousticbrainz_ng_jq_path"])
|
||||
self.models_path_input.setText(config.setting["acousticbrainz_ng_models_path"])
|
||||
self.cache_path_input.setText(config.setting["acousticbrainz_ng_cache_path"])
|
||||
|
||||
@@ -1614,7 +1668,6 @@ class AcousticBrainzNGOptionsPage(OptionsPage):
|
||||
|
||||
config.setting["acousticbrainz_ng_binaries_path"] = self.binaries_path_input.text()
|
||||
config.setting["acousticbrainz_ng_ffmpeg_path"] = self.ffmpeg_path_input.text()
|
||||
config.setting["acousticbrainz_ng_jq_path"] = self.jq_path_input.text()
|
||||
config.setting["acousticbrainz_ng_models_path"] = self.models_path_input.text()
|
||||
config.setting["acousticbrainz_ng_cache_path"] = self.cache_path_input.text()
|
||||
|
||||
|
||||
BIN
bin/Qt5Core.dll
BIN
bin/Qt5Core.dll
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
10
constants.py
10
constants.py
@@ -18,7 +18,7 @@ External dependencies:
|
||||
</ul>
|
||||
<strong>This plugin is CPU heavy!</strong>
|
||||
"""
|
||||
PLUGIN_VERSION = "1.1.1"
|
||||
PLUGIN_VERSION = "1.1.2"
|
||||
PLUGIN_API_VERSIONS = ["2.7", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13"]
|
||||
PLUGIN_LICENSE = "GPL-2.0-or-later"
|
||||
PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html"
|
||||
@@ -46,8 +46,9 @@ OPTIONAL_MODELS: List[Tuple[str, str]] = [
|
||||
]
|
||||
|
||||
REQUIRED_BINARIES: List[str] = [
|
||||
"streaming_extractor_music",
|
||||
"streaming_rhythmextractor_multifeature",
|
||||
"streaming_musicnn_predict",
|
||||
"streaming_key",
|
||||
"streaming_md5",
|
||||
]
|
||||
|
||||
@@ -57,7 +58,6 @@ ENV['TF_ENABLE_ONEDNN_OPTS'] = "0"
|
||||
CONFIG_OPTIONS = [
|
||||
TextOption("setting", "acousticbrainz_ng_binaries_path", os.path.join(os.path.dirname(__file__), "bin")),
|
||||
TextOption("setting", "acousticbrainz_ng_ffmpeg_path", os.path.join(os.path.dirname(sys.executable), "ffmpeg" + (".exe" if os.name == "nt" else ""))),
|
||||
TextOption("setting", "acousticbrainz_ng_jq_path", os.path.join(os.path.dirname(sys.executable), "jq" + (".exe" if os.name == "nt" else ""))),
|
||||
TextOption("setting", "acousticbrainz_ng_models_path", os.path.join(os.path.dirname(__file__), "models")),
|
||||
TextOption("setting", "acousticbrainz_ng_cache_path", os.path.join(os.path.dirname(__file__), "cache")),
|
||||
IntOption("setting", "acousticbrainz_ng_max_musicnn_workers", 4),
|
||||
@@ -67,6 +67,4 @@ CONFIG_OPTIONS = [
|
||||
BoolOption("setting", "acousticbrainz_ng_save_raw", False),
|
||||
BoolOption("setting", "acousticbrainz_ng_calculate_replaygain", True),
|
||||
BoolOption("setting", "acousticbrainz_ng_save_fingerprint", True)
|
||||
]
|
||||
|
||||
GAIA_KEY_ALGORITHMS = ["edma", "krumhansl", "temperley"]
|
||||
]
|
||||
Reference in New Issue
Block a user