Modal synthesis

Tools for computing the modes of an object using the finite element method

Rendering utilities


source

frequency_to_eigenvalue

 frequency_to_eigenvalue (frequencies)

source

eigenvalue_to_frequency

 eigenvalue_to_frequency (eigenvalues)

source

render_modes_coeffs

 render_modes_coeffs (eigenvalues:Union[numpy.ndarray,torch.Tensor],
                      eigenvectors:Union[numpy.ndarray,torch.Tensor],
                      alpha:float, beta:float,
                      length_in_samples:int=44100, sample_rate:int=44100)

Renders a batch of modes given damping coefficients.


source

render_modes

 render_modes (frequency:Union[torch.Tensor,numpy.ndarray],
               decay:Union[torch.Tensor,numpy.ndarray],
               amplitude:Union[torch.Tensor,numpy.ndarray], initial_phase:
               Union[torch.Tensor,numpy.ndarray,NoneType]=None,
               length_in_samples:int=44100, sample_rate:float=44100,
               return_sum:bool=True)

Renders a batch of modes given their parameters.

Type Default Details
frequency typing.Union[torch.Tensor, numpy.ndarray] Mode frequencies
decay typing.Union[torch.Tensor, numpy.ndarray] Mode decay parameters
amplitude typing.Union[torch.Tensor, numpy.ndarray] Mode amplitudes
initial_phase typing.Union[torch.Tensor, numpy.ndarray, NoneType] None
length_in_samples int 44100 Length of the output signal in samples
sample_rate float 44100 Sample rate of the output signal
return_sum bool True Return the sum of the modes
Returns typing.Union[torch.Tensor, numpy.ndarray] Rendered signal
# vals go from 1e9 to 1e10
vals = torch.rand(1, 64) * 1e9
vecs = torch.rand(1, 3, 3, 64) * 0.1
alpha = 5
beta = 4e-8
sr = 16000
audio = render_modes_coeffs(vals, vecs, alpha, beta, int(sr * 0.5), sr)

fig, axs = plt.subplots(3, 1, figsize=(10, 10))
for i in range(3):
    axs[i].plot(audio[0][1][i].numpy())
    axs[i].set_title(f"Pixel [1]{i}")
    ipd.display(ipd.Audio(audio[0][1][i].numpy(), rate=sr))

target_fft = torch.fft.rfft(audio[0, 0, 0, :]).abs()

freq = torch.fft.rfftfreq(audio.shape[-1], 1/sr)
plt.plot(freq.numpy(), target_fft.numpy())
plt.show()

damping_coeffs = 0.5 * (alpha + beta * vals[0])
damped_freqs = vals[0] - damping_coeffs**2
damped_freqs[damped_freqs < 0] = 0
damped_freqs = eigenvalue_to_frequency(damped_freqs)

plt.stem(damped_freqs.numpy(), vecs[0, 0, 0].numpy() * 4001)

<StemContainer object of 3 artists>

Examples

Generating a single decaying mode:

frequency = torch.tensor([440.0])
decay = torch.tensor([100.0])
amplitude = torch.ones(1)
length_in_samples = 1024
sample_rate = 16000.0

signal = render_modes(frequency, decay, amplitude, None, length_in_samples, sample_rate)
plt.plot(signal)

Example usage

m = MATERIALS["polycarbonate"]
s = System(m)

fig, axs = plt.subplots(4, 1, figsize=(8, 8))

# plot the eigenvalues
axs[0].stem(s.eigenvalues)
axs[0].set_title("Eigenvalues")

# plot the damped frequencies
axs[1].stem(s.damped_frequencies)
axs[1].set_title("Damped Frequencies")
axs[1].set_yscale("log")

# plot node gains
axs[2].stem(np.abs(s.get_mode_gains(100)))
axs[2].set_title("Mode Gains")

# plot amplitude envelopes given by damping coefficients
axs[3].plot(
    np.exp(-s.damping_coefficients * np.linspace(0, 2000.0 / 44100.0, 2000)[..., None])
)

fig.tight_layout()
fig.show()
/tmp/ipykernel_80158/1503038420.py:25: UserWarning: Matplotlib is currently using module://matplotlib_inline.backend_inline, which is a non-GUI backend, so cannot show the figure.
  fig.show()

Rendering audio from predefined materials:

f_s = 44100
T = 0.75
num_modes = 64
mesh = create_mesh(n_refinements=4)

# get the dimensions of the mesh
x_min, x_max = mesh.p[0].min(), mesh.p[0].max()
y_min, y_max = mesh.p[1].min(), mesh.p[1].max()


print(f"Mesh dimensions: {x_max - x_min} x {y_max - y_min}")
print(f"Mesh bounding box: ({x_min}, {y_min}) - ({x_max}, {y_max})")

for m in MATERIALS:
    s = System(MATERIALS[m], k=num_modes, mesh=mesh)
    signal = s.render(T, f_s)
    print(f"{m.lower()}:")
    save_and_display_audio(signal, f"{m.lower()}.wav", f_s)
Mesh dimensions: 1.0 x 1.0
Mesh bounding box: (0.0, 0.0) - (1.0, 1.0)
ceramic:
glass:
wood:
plastic:
iron:
polycarbonate:
steel:
custom:
num_modes = 32
custom_ranges = MaterialRanges(
    rho=(500, 20000),
    E=(2e10, 5e10),
    nu=(2, 2.11),
    alpha=(1, 12),
    beta=(1e-6, 2e-6),
)

Material.set_default_ranges(custom_ranges)
material = Material.random()
mesh = create_mesh(n_refinements=2)
center_idx = mesh.nodes_satisfying(lambda x: (x[0] == .5) & (x[1] == .5))
print(f"center node: {center_idx}")
s = System(material, k=num_modes, mesh=mesh)
signal = np.clip(s.render(T, f_s, impulse_node_idx=int(center_idx)), -1, 1)
save_and_display_audio(signal, f"custom.wav", f_s)
center node: [6]

Rendering audio from randomly generated materials:

f_s = 44100
T = 0.75

for i in range(4):
    m = Material.random()
    s = System(m)
    signal = s.render(T, f_s)
    print(f'Material {i}{m}:')
    save_and_display_audio(signal, f'random_{i}.wav', f_s)
Material 0 — Material(rho=10491.494936687797, E=23738481498.480457, nu=2.0589013931967086, alpha=11.987371289699409, beta=1.4941768366743795e-06):
Material 1 — Material(rho=18149.675239893957, E=41527152275.70115, nu=2.036596716834167, alpha=5.864244986055844, beta=1.6198757753683198e-06):
Material 2 — Material(rho=15295.51395426235, E=37207269181.088165, nu=2.0699158213124256, alpha=3.4064759996172103, beta=1.7399038626686502e-06):
Material 3 — Material(rho=10707.976358395857, E=20442477271.769733, nu=2.0415409164909177, alpha=7.575957672808222, beta=1.1207046599371003e-06):