Merge pull request #1 from DBraun/feature/pickle

add pickle support
This commit is contained in:
David Braun 2025-02-13 14:44:12 -05:00 committed by GitHub
commit b48be9ae87
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 227 additions and 7 deletions

1
.gitignore vendored
View file

@ -39,3 +39,4 @@ dataset
*.egg-info* *.egg-info*
vita/LICENSE vita/LICENSE
dist dist
headless/builds/VisualStudio2022/x64

View file

@ -1,6 +1,6 @@
# Vita # Vita
Vita is a Python module for interacting with the [Vital Synthesizer](https://github.com/mtytel/vital). **It is not an official product related to Vital**. Vita is a Python module for interacting with the [Vital Synthesizer](https://github.com/mtytel/vital). **It is not an official product related to Vital**. Vita uses [Effort-based versioning](https://jacobtomlinson.dev/effver/).
## Installation ## Installation
@ -35,8 +35,8 @@ synth.set_bpm(bpm)
print("potential sources:", vita.get_modulation_sources()) print("potential sources:", vita.get_modulation_sources())
print("potential destinations:", vita.get_modulation_destinations()) print("potential destinations:", vita.get_modulation_destinations())
# "lfo_1" and "filter_1_cutoff" are potential sources and destinations. # "lfo_1" is a potential source,
# Let's use "lfo_1" as a source and "filter_1_cutoff" as a destination. # and "filter_1_cutoff" is a potential destination.
assert synth.connect_modulation("lfo_1", "filter_1_cutoff") assert synth.connect_modulation("lfo_1", "filter_1_cutoff")
controls = synth.get_controls() controls = synth.get_controls()
@ -53,12 +53,15 @@ wavfile.write("generated_preset.wav", SAMPLE_RATE, audio.T)
preset_path = "generated_preset.vital" preset_path = "generated_preset.vital"
json_text = synth.to_json() json_text = synth.to_json()
with open(preset_path, "w") as f: with open(preset_path, "w") as f:
f.write(json_text) f.write(json_text)
# Load JSON text # Load JSON text
with open(preset_path, "r") as f: with open(preset_path, "r") as f:
json_text = f.read() json_text = f.read()
assert synth.load_json(json_text) assert synth.load_json(json_text)
# Or load directly from file # Or load directly from file

View file

@ -1,3 +1,7 @@
# export LIBDIR=/usr/lib/python3.10
# export PYTHONLIBPATH=/usr/lib/python3.10
# export PYTHONINCLUDEPATH=/usr/include/python3.10
echo "PYTHONLIBPATH: $PYTHONLIBPATH" echo "PYTHONLIBPATH: $PYTHONLIBPATH"
echo "PYTHONINCLUDEPATH: $PYTHONINCLUDEPATH" echo "PYTHONINCLUDEPATH: $PYTHONINCLUDEPATH"
echo "LIBDIR: $LIBDIR" echo "LIBDIR: $LIBDIR"
@ -16,3 +20,6 @@ cd ../..
make headless_server make headless_server
echo "build_linux.sh is done!" echo "build_linux.sh is done!"
# to build a wheel:
# python3 -m build --wheel

View file

@ -0,0 +1 @@
output

View file

@ -0,0 +1,14 @@
# Vita - Multiprocessing
This script demonstrates how to use [multiprocessing](https://docs.python.org/3/library/multiprocessing.html) to efficiently generate one-shots. The number of workers is by default `multiprocessing.cpu_count()`. Each worker has a persistent synthesizer instance. Each worker consumes paths of presets from a multiprocessing [Queue](https://docs.python.org/3/library/multiprocessing.html#pipes-and-queues). For each preset, the worker renders out audio for a configurable MIDI pitch range. The output audio path includes the pitch and preset name.
Example usage:
```bash
python main.py --preset-dir "path/to/vital_presets"
```
To see all available parameters:
```bash
python main.py --help
```

View file

@ -0,0 +1,183 @@
# This file is part of the Vita distribution (https://github.com/DBraun/Vita).
# Copyright (c) 2025 David Braun.
import argparse
from collections import namedtuple
import logging
import multiprocessing
import os
from pathlib import Path
import time
import traceback
# extra libraries to install with pip
import vita
import numpy as np
from scipy.io import wavfile
from tqdm import tqdm
Item = namedtuple("Item", "preset_path")
class Worker:
def __init__(
self,
queue: multiprocessing.Queue,
bpm: float = 120.0,
note_duration: float = 2.0,
render_duration: float = 5.0,
pitch_low: int = 60,
pitch_high: int = 72,
velocity: int = 100,
output_dir="output",
):
self.queue = queue
self.bpm = bpm
self.note_duration = note_duration
self.render_duration = render_duration
self.pitch_low, self.pitch_high = pitch_low, pitch_high
self.velocity = velocity
self.output_dir = Path(output_dir)
def startup(self):
synth = vita.Synth()
synth.set_bpm(self.bpm)
self.synth = synth
def process_item(self, item: Item):
preset_path = item.preset_path
self.synth.load_preset(preset_path)
basename = os.path.basename(preset_path)
for pitch in range(self.pitch_low, self.pitch_high + 1):
audio = self.synth.render(
pitch, self.velocity, self.note_duration, self.note_duration
)
output_path = self.output_dir / f"{pitch}_{basename}.wav"
wavfile.write(str(output_path), 44_100, audio.transpose())
def run(self):
try:
self.startup()
while True:
try:
item = self.queue.get_nowait()
self.process_item(item)
except multiprocessing.queues.Empty:
break
except Exception as e:
return traceback.format_exc()
def main(
preset_dir,
bpm: float = 120.0,
note_duration: float = 2.0,
render_duration: float = 4.0,
pitch_low: int = 60,
pitch_high: int = 60,
num_workers=None,
output_dir="output",
logging_level="INFO",
):
# Create logger
logging.basicConfig()
logger = logging.getLogger("vita")
logger.setLevel(logging_level.upper())
# Glob all the preset file paths
preset_paths = list(Path(preset_dir).rglob("*.vital"))
# Get num items so that the progress bar works well
num_items = len(preset_paths)
# Create a Queue and add items
input_queue = multiprocessing.Manager().Queue()
for preset_path in preset_paths:
input_queue.put(Item(str(preset_path)))
# Create a list to hold the worker processes
workers = []
# The number of workers to spawn
num_processes = num_workers or multiprocessing.cpu_count()
# Log info
logger.info(f"Note duration: {note_duration}")
logger.info(f"Render duration: {render_duration}")
logger.info(f"Using num workers: {num_processes}")
logger.info(f"Pitch low: {pitch_low}")
logger.info(f"Pitch high: {pitch_high}")
logger.info(f"Output directory: {output_dir}")
os.makedirs(output_dir, exist_ok=True)
# Create a multiprocessing Pool
with multiprocessing.Pool(processes=num_processes) as pool:
# Create and start a worker process for each CPU
for i in range(num_processes):
worker = Worker(
input_queue,
bpm=bpm,
note_duration=note_duration,
render_duration=render_duration,
pitch_low=pitch_low,
pitch_high=pitch_high,
output_dir=output_dir,
)
async_result = pool.apply_async(worker.run)
workers.append(async_result)
# Use tqdm to track progress. Update the progress bar in each iteration.
pbar = tqdm(total=num_items)
while True:
incomplete_count = sum(1 for w in workers if not w.ready())
pbar.update(
num_items - input_queue.qsize() - pbar.n
) # not perfectly accurate.
if incomplete_count == 0:
break
time.sleep(0.1)
pbar.close()
# Check for exceptions in the worker processes
for i, worker in enumerate(workers):
exception = worker.get()
if exception is not None:
logger.error(f"Exception in worker {i}:\n{exception}")
logger.info("All done!")
if __name__ == "__main__":
# We're using multiprocessing.Pool, so our code MUST be inside __main__.
# See https://docs.python.org/3/library/multiprocessing.html
# fmt: off
parser = argparse.ArgumentParser()
parser.add_argument("--preset-dir", required=True, help="Directory path of Vital presets.")
parser.add_argument("--bpm", default=120.0, type=float, help="Beats per minute for the Render Engine.")
parser.add_argument("--note-duration", default=1, type=float, help="Note duration in seconds.")
parser.add_argument("--pitch-low", default=60, type=int, help="Lowest MIDI pitch to be used (inclusive).")
parser.add_argument("--pitch-high", default=60, type=int, help="Highest MIDI pitch to be used (inclusive).")
parser.add_argument("--render-duration", default=1, type=float, help="Render duration in seconds.")
parser.add_argument("--num-workers", default=None, type=int, help="Number of workers to use.")
parser.add_argument("--output-dir", default=os.path.join(os.path.dirname(__file__), "output"), help="Output directory.")
parser.add_argument("--log-level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "NOTSET"], help="Logger level.")
# fmt: on
args = parser.parse_args()
main(
args.preset_dir,
args.bpm,
args.note_duration,
args.render_duration,
args.pitch_low,
args.pitch_high,
args.num_workers,
args.output_dir,
args.log_level,
)

View file

@ -379,6 +379,15 @@ NB_MODULE(vita, m) {
.def(nb::init<>()) // Ensure there's a default constructor or adjust .def(nb::init<>()) // Ensure there's a default constructor or adjust
// accordingly // accordingly
// Bind the first overload of connectModulation // Bind the first overload of connectModulation
.def("__getstate__", [](HeadlessSynth &synth) {
return const_cast<HeadlessSynth &>(synth).pyToJson(); // Removes const safely
})
.def("__setstate__", [](HeadlessSynth &synth, const std::string &json) {
new (&synth) HeadlessSynth();
synth.loadFromString(json);
})
.def("connect_modulation", .def("connect_modulation",
nb::overload_cast<const std::string &, const std::string &>( nb::overload_cast<const std::string &, const std::string &>(
&HeadlessSynth::pyConnectModulation), &HeadlessSynth::pyConnectModulation),

View file

@ -53,8 +53,10 @@ def test_render(bpm=120.0, note_dur=1.0, render_dur=3.0, pitch=36, velocity=0.7)
# Load JSON text # Load JSON text
with open(preset_path, "r") as f: with open(preset_path, "r") as f:
json_text = f.read() json_text1 = f.read()
assert synth.load_json(json_text) assert synth.load_json(json_text1)
assert json_text == json_text1
# Or load directly from file: # Or load directly from file:
assert synth.load_preset(preset_path) assert synth.load_preset(preset_path)

View file

@ -1 +1 @@
__version__ = "0.0.3" __version__ = "0.0.4"