Parse results

This commit is contained in:
2025-08-06 21:07:03 -04:00
parent 9c0b8f9ab5
commit efce6e5acc
2 changed files with 460 additions and 277 deletions

View File

@@ -1,276 +1,22 @@
import os
import json
import subprocess
import hashlib
from picard import config, log
from picard.ui.options import (
OptionsPage,
register_options_page,
)
from picard.ui.itemviews import (
BaseAction,
register_track_action,
# register_album_action,
)
from picard.track import Track
from PyQt5 import QtWidgets
import threading
import concurrent.futures
PLUGIN_NAME = "AcousticBrainz-ng"
PLUGIN_AUTHOR = "cy1der"
PLUGIN_DESCRIPTION = """
Analyze track acoustic characteristics using Essentia
<br/>
This plugin is not affiliated with the <a href='https://acousticbrainz.org'>AcousticBrainz</a> project<br/>
This is not a 1:1 recreation of the AcousticBrainz schema, but will provide most of the meaningful data<br/>
External dependencies:
<ul>
<li><a href='https://essentia.upf.edu'>Essentia</a> binaries compiled with TensorFlow and gaia2 support</li>
<li>A few MusicNN models (see user guide for details)</li>
</ul>
<strong>This plugin is CPU heavy!</strong>
"""
PLUGIN_VERSION = "1.0.0"
PLUGIN_API_VERSIONS = ["2.7", "2.8", "2.9", "2.10", "2.11"]
PLUGIN_LICENSE = "GPL-2.0-or-later"
PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html"
PLUGIN_USER_GUIDE_URL = "https://example.com" # TODO: Update with actual user guide URL
from picard import config, log
from picard.ui.itemviews import (
BaseAction,
register_track_action,
register_album_action,
)
from picard.track import Track
from picard.album import Album
from picard.ui.options import OptionsPage, register_options_page
from PyQt5 import QtWidgets
REQUIRED_MODELS: list[tuple[str, str]] = [
("msd-musicnn-1", "msd.json"),
("mood_acoustic-musicnn-mtt-2", "mood_acoustic.json"),
("mood_aggressive-musicnn-mtt-2", "mood_aggressive.json"),
("mood_electronic-musicnn-msd-2", "mood_electronic.json"),
("mood_happy-musicnn-msd-2", "mood_happy.json"),
("mood_party-musicnn-mtt-2", "mood_party.json"),
("mood_relaxed-musicnn-msd-2", "mood_relaxed.json"),
("mood_sad-musicnn-msd-2", "mood_sad.json"),
("danceability-musicnn-msd-2", "danceability.json"),
("gender-musicnn-msd-2", "gender.json"),
("tonal_atonal-musicnn-mtt-2", "tonality.json"),
("voice_instrumental-musicnn-msd-2", "voice_instrumental.json")
]
OPTIONAL_MODELS: list[tuple[str, str]] = [
("genre_electronic-musicnn-msd-2", "genre_electronic.json"),
("genre_rosamerica-musicnn-msd-2", "genre_rosamerica.json"),
("genre_tzanetakis-musicnn-msd-2", "genre_tzanetakis.json")
]
REQUIRED_BINARIES: list[str] = [
"streaming_extractor_music",
"streaming_musicnn_predict",
"streaming_md5",
]
# Avoid memory hogging
TF_ENABLE_ONEDNN_OPTS: int = 0
ENV = os.environ.copy()
ENV['TF_ENABLE_ONEDNN_OPTS'] = str(TF_ENABLE_ONEDNN_OPTS)
config.TextOption("setting", "acousticbrainz_ng_binaries_path", os.path.join(os.path.dirname(__file__), "bin"))
config.TextOption("setting", "acousticbrainz_ng_models_path", os.path.join(os.path.dirname(__file__), "models"))
config.TextOption("setting", "acousticbrainz_ng_cache_path", os.path.join(os.path.dirname(__file__), "cache"))
config.IntOption("setting", "acousticbrainz_ng_max_musicnn_workers", 4)
config.BoolOption("setting", "acousticbrainz_ng_autorun", False)
config.BoolOption("setting", "acousticbrainz_ng_analyze_optional", False)
config.BoolOption("setting", "acousticbrainz_ng_save_raw", False)
class AcousticBrainzNGOptionsPage(OptionsPage):
NAME = "acousticbrainz_ng"
TITLE = "AcousticBrainz-ng"
PARENT = "plugins"
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setup_ui()
def _create_path_input_layout(self, line_edit: QtWidgets.QLineEdit, browse_callback, check_callback=None) -> QtWidgets.QHBoxLayout:
layout = QtWidgets.QHBoxLayout()
browse_button = QtWidgets.QPushButton("Browse", self)
browse_button.clicked.connect(browse_callback)
layout.addWidget(line_edit)
layout.addWidget(browse_button)
if check_callback:
check_button = QtWidgets.QPushButton("Check", self)
check_button.clicked.connect(check_callback)
layout.addWidget(check_button)
return layout
def setup_ui(self) -> None:
layout = QtWidgets.QVBoxLayout(self)
options_group = QtWidgets.QGroupBox("Options", self)
options_group.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
options_layout = QtWidgets.QVBoxLayout(options_group)
self.autorun_checkbox = QtWidgets.QCheckBox("Autorun analysis", self)
self.autorun_checkbox.setToolTip("Automatically run analysis on new tracks")
self.analyze_optional_checkbox = QtWidgets.QCheckBox("Analyze optional models", self)
self.analyze_optional_checkbox.setToolTip("Include optional models in the analysis")
self.save_raw_checkbox = QtWidgets.QCheckBox("Save raw values", self)
self.save_raw_checkbox.setToolTip("Save raw MusicNN numbers in the metadata")
musicnn_workers_layout = QtWidgets.QHBoxLayout()
musicnn_workers_label = QtWidgets.QLabel("Max MusicNN workers:", self)
musicnn_workers_label.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.musicnn_workers_input = QtWidgets.QSpinBox(self)
self.musicnn_workers_input.setToolTip("Maximum number of concurrent MusicNN workers")
self.musicnn_workers_input.setRange(1, max(len(REQUIRED_MODELS), len(OPTIONAL_MODELS)))
self.musicnn_workers_input.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
musicnn_workers_layout.addWidget(musicnn_workers_label)
musicnn_workers_layout.addStretch()
musicnn_workers_layout.addWidget(self.musicnn_workers_input)
options_layout.addWidget(self.autorun_checkbox)
options_layout.addWidget(self.analyze_optional_checkbox)
options_layout.addWidget(self.save_raw_checkbox)
options_layout.addLayout(musicnn_workers_layout)
layout.addWidget(options_group)
paths_group = QtWidgets.QGroupBox("Paths", self)
paths_group.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
paths_layout = QtWidgets.QVBoxLayout(paths_group)
# Binaries path
self.binaries_path_input = QtWidgets.QLineEdit(self)
self.binaries_path_input.setPlaceholderText("Path to Essentia binaries")
binaries_layout = self._create_path_input_layout(
self.binaries_path_input,
lambda: self._browse_folder(self.binaries_path_input),
lambda: (self._check_binaries(show_success=True), None)[1]
)
# Models path
self.models_path_input = QtWidgets.QLineEdit(self)
self.models_path_input.setPlaceholderText("Path to MusicNN models")
models_layout = self._create_path_input_layout(
self.models_path_input,
lambda: self._browse_folder(self.models_path_input),
lambda: (self._check_models(show_success=True, check_optional=True), None)[1]
)
# Cache path
self.cache_path_input = QtWidgets.QLineEdit(self)
self.cache_path_input.setPlaceholderText("Path to cache directory")
cache_layout = self._create_path_input_layout(
self.cache_path_input,
lambda: self._browse_folder(self.cache_path_input)
)
paths_layout.addWidget(QtWidgets.QLabel("Binaries", self))
paths_layout.addLayout(binaries_layout)
paths_layout.addWidget(QtWidgets.QLabel("Models", self))
paths_layout.addLayout(models_layout)
paths_layout.addWidget(QtWidgets.QLabel("Cache", self))
paths_layout.addLayout(cache_layout)
layout.addWidget(paths_group)
layout.addStretch()
def _check_binaries(self, show_success=False) -> bool:
path = self.binaries_path_input.text()
if not path or not os.path.exists(path):
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty path.")
return False
missing_binaries = []
for binary in REQUIRED_BINARIES:
binary_path = AcousticBrainzNG._get_binary_path(binary, path)
if not os.path.exists(binary_path):
missing_binaries.append(binary)
if missing_binaries:
message = f"Missing binaries:\n" + "\n".join(f"{binary}" for binary in missing_binaries)
QtWidgets.QMessageBox.warning(self, "Binaries", message)
return False
else:
if show_success:
QtWidgets.QMessageBox.information(self, "Binaries", "All binaries found!")
return True
def _check_models(self, show_success=False, check_optional=True) -> bool:
path = self.models_path_input.text()
if not path or not os.path.exists(path):
QtWidgets.QMessageBox.warning(self, "Models", "Invalid or empty path.")
return False
missing_required = []
for model in REQUIRED_MODELS:
model_path = os.path.join(path, f"{model[0]}.pb")
if not os.path.exists(model_path):
missing_required.append(model[0])
missing_optional = []
if check_optional:
for model in OPTIONAL_MODELS:
model_path = os.path.join(path, f"{model[0]}.pb")
if not os.path.exists(model_path):
missing_optional.append(model[0])
if missing_required:
message = f"Missing required models:\n" + "\n".join(f"{model}.pb" for model in missing_required)
QtWidgets.QMessageBox.warning(self, "Models", message)
return False
elif missing_optional and check_optional:
message = f"Missing optional models:\n" + "\n".join(f"{model}.pb" for model in missing_optional)
QtWidgets.QMessageBox.information(self, "Models", message)
if show_success:
if missing_optional and check_optional:
QtWidgets.QMessageBox.information(self, "Models", "All required models found! Some optional models are missing.")
else:
QtWidgets.QMessageBox.information(self, "Models", "All models found!")
return True
def _browse_folder(self, line_edit: QtWidgets.QLineEdit) -> None:
folder = QtWidgets.QFileDialog.getExistingDirectory(
self, "Select Folder",
line_edit.text() or os.path.expanduser("~")
)
if folder:
line_edit.setText(folder)
def load(self):
self.autorun_checkbox.setChecked(config.setting["acousticbrainz_ng_autorun"] or False)
self.analyze_optional_checkbox.setChecked(config.setting["acousticbrainz_ng_analyze_optional"] or False)
self.save_raw_checkbox.setChecked(config.setting["acousticbrainz_ng_save_raw"] or False)
self.musicnn_workers_input.setValue(config.setting["acousticbrainz_ng_max_musicnn_workers"] or 4)
self.binaries_path_input.setText(config.setting["acousticbrainz_ng_binaries_path"])
self.models_path_input.setText(config.setting["acousticbrainz_ng_models_path"])
self.cache_path_input.setText(config.setting["acousticbrainz_ng_cache_path"])
def save(self):
self._check_binaries()
self._check_models(show_success=False, check_optional=False)
config.setting["acousticbrainz_ng_autorun"] = self.autorun_checkbox.isChecked()
config.setting["acousticbrainz_ng_analyze_optional"] = self.analyze_optional_checkbox.isChecked()
config.setting["acousticbrainz_ng_save_raw"] = self.save_raw_checkbox.isChecked()
max_workers = max(1, min(self.musicnn_workers_input.value(), max(len(REQUIRED_MODELS), len(OPTIONAL_MODELS))))
config.setting["acousticbrainz_ng_max_musicnn_workers"] = max_workers
config.setting["acousticbrainz_ng_binaries_path"] = self.binaries_path_input.text()
config.setting["acousticbrainz_ng_models_path"] = self.models_path_input.text()
config.setting["acousticbrainz_ng_cache_path"] = self.cache_path_input.text()
from .constants import *
class AcousticBrainzNG:
def __init__(self):
@@ -311,10 +57,9 @@ class AcousticBrainzNG:
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model {model_name} not found at {model_path}")
output_file_path = os.path.join(output_path, output_file)
output_file_path = os.path.join(output_path, f"{output_file}.json")
if os.path.exists(output_file_path):
log.debug(f"{output_file_path} already exists, skipping {model_name}")
return
subprocess.run(
@@ -354,7 +99,6 @@ class AcousticBrainzNG:
def run_gaia():
if os.path.exists(os.path.join(output_path, "gaia.json")):
log.debug(f"Gaia output already exists at {os.path.join(output_path, 'gaia.json')}, skipping")
return
subprocess.run(
@@ -391,6 +135,172 @@ class AcousticBrainzNG:
self._run_musicnn_models(OPTIONAL_MODELS, musicnn_binary_path, file, output_path)
def parse_required(self, metadata: dict, file: str) -> None:
if not self._check_required_models():
raise ValueError("Required models not found")
models_path = config.setting["acousticbrainz_ng_models_path"]
if not models_path:
raise ValueError("Models path not configured")
output_path = self._generate_cache_folder(metadata, file)
if not output_path:
raise ValueError("Failed to generate cache folder path")
moods = []
tags = []
for model, output in REQUIRED_MODELS:
model_json_path = os.path.join(models_path, f"{model}.json")
if not os.path.exists(model_json_path):
log.error(f"Model JSON metadata not found: {model_json_path}")
return
output_file_path = os.path.join(output_path, f"{output}.json")
if not os.path.exists(output_file_path):
log.error(f"Output file not found: {output_file_path}")
return
output_data = {}
model_metadata = {}
try:
with open(model_json_path, 'r', encoding='utf-8') as f:
model_metadata = json.load(f)
with open(output_file_path, 'r', encoding='utf-8') as f:
output_data = json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
log.error(f"Error reading model or output file: {e}")
return
if not output_data["predictions"] or not output_data["predictions"]["mean"]:
log.error(f"No predictions found in output data for {model}")
return
if not model_metadata["classes"] or len(model_metadata["classes"]) != len(output_data["predictions"]["mean"]):
log.error(f"No or invalid classes defined in model metadata for {model}")
return
if len(model_metadata["classes"]) == 2:
values = output_data["predictions"]["mean"]
max_index = values.index(max(values))
mood_class = model_metadata["classes"][max_index]
mood_formatted = self._format_class(mood_class)
moods.append(mood_formatted)
elif model == REQUIRED_MODELS[0][0]:
values = output_data["predictions"]["mean"]
class_value_pairs = [
{"class": class_name, "value": value}
for class_name, value in zip(model_metadata["classes"], values)
]
top5 = sorted(class_value_pairs, key=lambda x: x["value"], reverse=True)[:5]
for item in top5:
formatted_tag = item["class"][0].upper() + item["class"][1:] if item["class"] else ""
tags.append(formatted_tag)
if config.setting["acousticbrainz_ng_save_raw"]:
for i in range(len(output_data["predictions"]["mean"])):
metadata[f"ab:hi:{output}:{model_metadata['classes'][i].replace('non', 'not').replace('_', ' ').lower()}"] = output_data["predictions"]["mean"][i]
metadata['mood'] = moods
metadata['tags'] = tags
gaia_data = {}
gaia_json_path = os.path.join(output_path, "gaia.json")
if os.path.exists(gaia_json_path):
try:
with open(gaia_json_path, 'r', encoding='utf-8') as f:
gaia_data = json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
log.error(f"Error reading Gaia JSON file: {e}")
return
else:
log.error(f"Gaia JSON file not found: {gaia_json_path}")
return
metadata["bpm"] = int(round(gaia_data["rhythm"]["bpm"]))
if config.setting["acousticbrainz_ng_save_raw"]:
metadata["ab:lo:tonal:chords_changes_rate"] = gaia_data["tonal"]["chords_changes_rate"]
metadata["ab:lo:tonal:chords_key"] = gaia_data["tonal"]["chords_key"]
metadata["ab:lo:tonal:chords_scale"] = gaia_data["tonal"]["chords_scale"]
highestStrength = -1
selectedAlgorithm = None
for algorithm in GAIA_KEY_ALGORITHMS:
key_data = gaia_data["tonal"][f"key_{algorithm}"]
if key_data["strength"] > highestStrength:
highestStrength = key_data["strength"]
selectedAlgorithm = algorithm
if selectedAlgorithm:
selected_key_data = gaia_data["tonal"][f"key_{selectedAlgorithm}"]
metadata["key"] = "o" if selected_key_data["scale"] == "off" else f"{selected_key_data["key"]}{"m" if selected_key_data["scale"] == "minor" else ""}"
if config.setting["acousticbrainz_ng_save_raw"]:
metadata["ab:lo:tonal:key_scale"] = selected_key_data["scale"]
metadata["ab:lo:tonal:key_key"] = selected_key_data["key"]
def parse_optional(self, metadata: dict, file: str) -> None:
if not self._check_optional_models():
raise ValueError("Optional models not found")
models_path = config.setting["acousticbrainz_ng_models_path"]
if not models_path:
raise ValueError("Models path not configured")
output_path = self._generate_cache_folder(metadata, file)
if not output_path:
raise ValueError("Failed to generate cache folder path")
for model, output in OPTIONAL_MODELS:
model_json_path = os.path.join(models_path, f"{model}.json")
if not os.path.exists(model_json_path):
log.error(f"Model JSON metadata not found: {model_json_path}")
return
output_file_path = os.path.join(output_path, f"{output}.json")
if not os.path.exists(output_file_path):
log.error(f"Output file not found: {output_file_path}")
return
output_data = {}
model_metadata = {}
try:
with open(model_json_path, 'r', encoding='utf-8') as f:
model_metadata = json.load(f)
with open(output_file_path, 'r', encoding='utf-8') as f:
output_data = json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
log.error(f"Error reading model or output file: {e}")
return
if not output_data["predictions"] or not output_data["predictions"]["mean"]:
log.error(f"No predictions found in output data for {model}")
return
if not model_metadata["classes"] or len(model_metadata["classes"]) != len(output_data["predictions"]["mean"]):
log.error(f"No or invalid classes defined in model metadata for {model}")
return
if config.setting["acousticbrainz_ng_save_raw"]:
for i in range(len(output_data["predictions"]["mean"])):
metadata[f"ab:hi:{output}:{model_metadata['classes'][i].replace('non', 'not').replace('_', ' ').lower()}"] = output_data["predictions"]["mean"][i]
@staticmethod
def _format_class(class_name: str) -> str:
return class_name.replace("non", "not").replace("_", " ").capitalize()
def _generate_cache_folder(self, metadata: dict, file_path: str) -> str:
cache_base = config.setting["acousticbrainz_ng_cache_path"]
if not cache_base:
@@ -477,17 +387,226 @@ class AcousticBrainzNG:
acousticbrainz_ng = AcousticBrainzNG()
class AcousticBrainzNGTrackAction(BaseAction):
class AcousticBrainzNGAction(BaseAction):
NAME = f"Analyze with {PLUGIN_NAME}"
def callback(self, objs):
tracks = list(filter(lambda o: isinstance(o, Track), objs))
for track in tracks:
acousticbrainz_ng.analyze_required(track.metadata, track.files[0].filename)
def _process_track(self, track: Track) -> None:
for file in track.files:
acousticbrainz_ng.analyze_required(file.metadata, file.filename)
acousticbrainz_ng.parse_required(file.metadata, file.filename)
# TODO: Implement track replaygain
if config.setting["acousticbrainz_ng_analyze_optional"]:
acousticbrainz_ng.analyze_optional(track.metadata, track.files[0].filename)
acousticbrainz_ng.analyze_optional(file.metadata, file.filename)
acousticbrainz_ng.parse_optional(file.metadata, file.filename)
def callback(self, objs):
for item in (t for t in objs if isinstance(t, Track) or isinstance(t, Album)):
if isinstance(item, Track):
self._process_track(item)
elif isinstance(item, Album):
for track in item.tracks:
self._process_track(track)
# TODO: Implement album replaygain
class AcousticBrainzNGOptionsPage(OptionsPage):
NAME = "acousticbrainz_ng"
TITLE = "AcousticBrainz-ng"
PARENT = "plugins"
options = CONFIG_OPTIONS
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setup_ui()
def _create_path_input_layout(self, line_edit: QtWidgets.QLineEdit, browse_callback, check_callback=None) -> QtWidgets.QHBoxLayout:
layout = QtWidgets.QHBoxLayout()
browse_button = QtWidgets.QPushButton("Browse", self)
browse_button.clicked.connect(browse_callback)
layout.addWidget(line_edit)
layout.addWidget(browse_button)
if check_callback:
check_button = QtWidgets.QPushButton("Check", self)
check_button.clicked.connect(check_callback)
layout.addWidget(check_button)
return layout
def setup_ui(self) -> None:
layout = QtWidgets.QVBoxLayout(self)
options_group = QtWidgets.QGroupBox("Options", self)
options_group.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
options_layout = QtWidgets.QVBoxLayout(options_group)
self.autorun_checkbox = QtWidgets.QCheckBox("Autorun analysis", self)
self.autorun_checkbox.setToolTip("Automatically run analysis on new tracks")
self.analyze_optional_checkbox = QtWidgets.QCheckBox("Analyze optional MusicNN models", self)
self.analyze_optional_checkbox.setToolTip("Include optional MusicNN models in the analysis, currently unused unless raw values is enabled")
self.save_raw_checkbox = QtWidgets.QCheckBox("Save raw values", self)
self.save_raw_checkbox.setToolTip("Save raw MusicNN numbers in the metadata")
musicnn_workers_layout = QtWidgets.QHBoxLayout()
musicnn_workers_label = QtWidgets.QLabel("Max MusicNN workers:", self)
musicnn_workers_label.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
self.musicnn_workers_input = QtWidgets.QSpinBox(self)
self.musicnn_workers_input.setToolTip("Maximum number of concurrent MusicNN workers")
self.musicnn_workers_input.setRange(1, max(len(REQUIRED_MODELS), len(OPTIONAL_MODELS)))
self.musicnn_workers_input.setSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
musicnn_workers_layout.addWidget(musicnn_workers_label)
musicnn_workers_layout.addStretch()
musicnn_workers_layout.addWidget(self.musicnn_workers_input)
options_layout.addWidget(self.autorun_checkbox)
options_layout.addWidget(self.analyze_optional_checkbox)
options_layout.addWidget(self.save_raw_checkbox)
options_layout.addLayout(musicnn_workers_layout)
layout.addWidget(options_group)
paths_group = QtWidgets.QGroupBox("Paths", self)
paths_group.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
paths_layout = QtWidgets.QVBoxLayout(paths_group)
# Binaries path
self.binaries_path_input = QtWidgets.QLineEdit(self)
self.binaries_path_input.setPlaceholderText("Path to Essentia binaries")
binaries_layout = self._create_path_input_layout(
self.binaries_path_input,
lambda: self._browse_folder(self.binaries_path_input),
lambda: (self._check_binaries(show_success=True), None)[1]
)
# Models path
self.models_path_input = QtWidgets.QLineEdit(self)
self.models_path_input.setPlaceholderText("Path to MusicNN models")
models_layout = self._create_path_input_layout(
self.models_path_input,
lambda: self._browse_folder(self.models_path_input),
lambda: (self._check_models(show_success=True, check_optional=True), None)[1]
)
# Cache path
self.cache_path_input = QtWidgets.QLineEdit(self)
self.cache_path_input.setPlaceholderText("Path to cache directory")
cache_layout = self._create_path_input_layout(
self.cache_path_input,
lambda: self._browse_folder(self.cache_path_input)
)
paths_layout.addWidget(QtWidgets.QLabel("Binaries", self))
paths_layout.addLayout(binaries_layout)
paths_layout.addWidget(QtWidgets.QLabel("Models", self))
paths_layout.addLayout(models_layout)
paths_layout.addWidget(QtWidgets.QLabel("Cache", self))
paths_layout.addLayout(cache_layout)
layout.addWidget(paths_group)
layout.addStretch()
def _check_binaries(self, show_success=False) -> bool:
path = self.binaries_path_input.text()
if not path or not os.path.exists(path):
QtWidgets.QMessageBox.warning(self, "Binaries", "Invalid or empty path.")
return False
missing_binaries = []
for binary in REQUIRED_BINARIES:
binary_path = os.path.join(path, binary)
if os.name == 'nt': # Windows
binary_path += '.exe'
if not os.path.exists(binary_path):
missing_binaries.append(binary)
if missing_binaries:
message = f"Missing binaries:\n" + "\n".join(f"{binary}" for binary in missing_binaries)
QtWidgets.QMessageBox.warning(self, "Binaries", message)
return False
else:
if show_success:
QtWidgets.QMessageBox.information(self, "Binaries", "All binaries found!")
return True
def _check_models(self, show_success=False, check_optional=True) -> bool:
path = self.models_path_input.text()
if not path or not os.path.exists(path):
QtWidgets.QMessageBox.warning(self, "Models", "Invalid or empty path.")
return False
missing_required = []
for model in REQUIRED_MODELS:
model_path = os.path.join(path, f"{model[0]}.pb")
metadata_path = os.path.join(path, f"{model[0]}.json")
if not os.path.exists(model_path) or not os.path.exists(metadata_path):
missing_required.append(model[0])
missing_optional = []
if check_optional:
for model in OPTIONAL_MODELS:
model_path = os.path.join(path, f"{model[0]}.pb")
metadata_path = os.path.join(path, f"{model[0]}.json")
if not os.path.exists(model_path) or not os.path.exists(metadata_path):
missing_optional.append(model[0])
if missing_required:
message = f"Note: Model JSON metadata required as well\n\nMissing required models:\n" + "\n".join(f"{model}" for model in missing_required)
QtWidgets.QMessageBox.warning(self, "Models", message)
return False
elif missing_optional and check_optional:
message = f"Note: Model JSON metadata required as well\n\nMissing optional models:\n" + "\n".join(f"{model}" for model in missing_optional)
QtWidgets.QMessageBox.information(self, "Models", message)
if show_success:
if missing_optional and check_optional:
QtWidgets.QMessageBox.information(self, "Models", "All required models found! Some optional models are missing.")
else:
QtWidgets.QMessageBox.information(self, "Models", "All models found!")
return True
def _browse_folder(self, line_edit: QtWidgets.QLineEdit) -> None:
folder = QtWidgets.QFileDialog.getExistingDirectory(
self, "Select Folder",
line_edit.text() or os.path.expanduser("~")
)
if folder:
line_edit.setText(folder)
def load(self):
self.autorun_checkbox.setChecked(config.setting["acousticbrainz_ng_autorun"] or False)
self.analyze_optional_checkbox.setChecked(config.setting["acousticbrainz_ng_analyze_optional"] or False)
self.save_raw_checkbox.setChecked(config.setting["acousticbrainz_ng_save_raw"] or False)
self.musicnn_workers_input.setValue(config.setting["acousticbrainz_ng_max_musicnn_workers"] or 4)
self.binaries_path_input.setText(config.setting["acousticbrainz_ng_binaries_path"])
self.models_path_input.setText(config.setting["acousticbrainz_ng_models_path"])
self.cache_path_input.setText(config.setting["acousticbrainz_ng_cache_path"])
def save(self):
self._check_binaries()
self._check_models(show_success=False, check_optional=False)
config.setting["acousticbrainz_ng_autorun"] = self.autorun_checkbox.isChecked()
config.setting["acousticbrainz_ng_analyze_optional"] = self.analyze_optional_checkbox.isChecked()
config.setting["acousticbrainz_ng_save_raw"] = self.save_raw_checkbox.isChecked()
max_workers = max(1, min(self.musicnn_workers_input.value(), max(len(REQUIRED_MODELS), len(OPTIONAL_MODELS))))
config.setting["acousticbrainz_ng_max_musicnn_workers"] = max_workers
config.setting["acousticbrainz_ng_binaries_path"] = self.binaries_path_input.text()
config.setting["acousticbrainz_ng_models_path"] = self.models_path_input.text()
config.setting["acousticbrainz_ng_cache_path"] = self.cache_path_input.text()
register_options_page(AcousticBrainzNGOptionsPage)
register_track_action(AcousticBrainzNGTrackAction())
register_track_action(AcousticBrainzNGAction())
register_album_action(AcousticBrainzNGAction())

64
constants.py Normal file
View File

@@ -0,0 +1,64 @@
import os
from picard.config import BoolOption, TextOption, IntOption
PLUGIN_NAME = "AcousticBrainz-ng"
PLUGIN_AUTHOR = "cy1der"
PLUGIN_DESCRIPTION = """
Analyze track acoustic characteristics using Essentia
<br/>
This plugin is not affiliated with the <a href='https://acousticbrainz.org'>AcousticBrainz</a> project<br/>
This is not a 1:1 recreation of the AcousticBrainz schema, but will provide most of the meaningful data<br/>
External dependencies:
<ul>
<li><a href='https://essentia.upf.edu'>Essentia</a> binaries compiled with TensorFlow and gaia2 support</li>
<li>A few MusicNN models (see user guide for details)</li>
</ul>
<strong>This plugin is CPU heavy!</strong>
"""
PLUGIN_VERSION = "1.0.0"
PLUGIN_API_VERSIONS = ["2.7", "2.8", "2.9", "2.10", "2.11"]
PLUGIN_LICENSE = "GPL-2.0-or-later"
PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html"
PLUGIN_USER_GUIDE_URL = "https://example.com" # TODO: Update with actual user guide URL
REQUIRED_MODELS: list[tuple[str, str]] = [
("msd-musicnn-1", "msd"),
("mood_acoustic-musicnn-mtt-2", "mood_acoustic"),
("mood_aggressive-musicnn-mtt-2", "mood_aggressive"),
("mood_electronic-musicnn-msd-2", "mood_electronic"),
("mood_happy-musicnn-msd-2", "mood_happy"),
("mood_party-musicnn-mtt-2", "mood_party"),
("mood_relaxed-musicnn-msd-2", "mood_relaxed"),
("mood_sad-musicnn-msd-2", "mood_sad"),
("danceability-musicnn-msd-2", "danceability"),
("gender-musicnn-msd-2", "gender"),
("tonal_atonal-musicnn-mtt-2", "tonal_atonal"),
("voice_instrumental-musicnn-msd-2", "voice_instrumental")
]
OPTIONAL_MODELS: list[tuple[str, str]] = [
("genre_electronic-musicnn-msd-2", "genre_electronic"),
("genre_rosamerica-musicnn-msd-2", "genre_rosamerica"),
("genre_tzanetakis-musicnn-msd-2", "genre_tzanetakis")
]
REQUIRED_BINARIES: list[str] = [
"streaming_extractor_music",
"streaming_musicnn_predict",
"streaming_md5",
]
ENV = os.environ.copy()
ENV['TF_ENABLE_ONEDNN_OPTS'] = "0"
CONFIG_OPTIONS = [
TextOption("setting", "acousticbrainz_ng_binaries_path", os.path.join(os.path.dirname(__file__), "bin")),
TextOption("setting", "acousticbrainz_ng_models_path", os.path.join(os.path.dirname(__file__), "models")),
TextOption("setting", "acousticbrainz_ng_cache_path", os.path.join(os.path.dirname(__file__), "cache")),
IntOption("setting", "acousticbrainz_ng_max_musicnn_workers", 4),
BoolOption("setting", "acousticbrainz_ng_autorun", False),
BoolOption("setting", "acousticbrainz_ng_analyze_optional", False),
BoolOption("setting", "acousticbrainz_ng_save_raw", False)
]
GAIA_KEY_ALGORITHMS = ["edma", "krumhansl", "temperley"]