Updating link syntax (#1236)

This commit is contained in:
Yotam Mann 2024-04-29 10:48:37 -04:00 committed by GitHub
parent 080856221c
commit 57eacfa13b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
55 changed files with 116 additions and 116 deletions

View file

@ -92,8 +92,8 @@ export class Analyser extends ToneAudioNode<AnalyserOptions> {
}
/**
* Run the analysis given the current settings. If [[channels]] = 1,
* it will return a Float32Array. If [[channels]] > 1, it will
* Run the analysis given the current settings. If {@link channels} = 1,
* it will return a Float32Array. If {@link channels} > 1, it will
* return an array of Float32Arrays where each index in the array
* represents the analysis done on a channel.
*/
@ -128,7 +128,7 @@ export class Analyser extends ToneAudioNode<AnalyserOptions> {
/**
* The number of channels the analyser does the analysis on. Channel
* separation is done using [[Split]]
* separation is done using {@link Split}
*/
get channels(): number {
return this._analysers.length;

View file

@ -51,7 +51,7 @@ export class FFT extends MeterBase<FFTOptions> {
/**
* Gets the current frequency data from the connected audio source.
* Returns the frequency data of length [[size]] as a Float32Array of decibel values.
* Returns the frequency data of length {@link size} as a Float32Array of decibel values.
*/
getValue(): Float32Array {
const values = this._analyser.getValue() as Float32Array;
@ -60,7 +60,7 @@ export class FFT extends MeterBase<FFTOptions> {
/**
* The size of analysis. This must be a power of two in the range 16 to 16384.
* Determines the size of the array returned by [[getValue]] (i.e. the number of
* Determines the size of the array returned by {@link getValue} (i.e. the number of
* frequency bins). Large FFT sizes may be costly to compute.
*/
get size(): PowerOfTwo {
@ -81,7 +81,7 @@ export class FFT extends MeterBase<FFTOptions> {
}
/**
* Returns the frequency value in hertz of each of the indices of the FFT's [[getValue]] response.
* Returns the frequency value in hertz of each of the indices of the FFT's {@link getValue} response.
* @example
* const fft = new Tone.FFT(32);
* console.log([0, 1, 2, 3, 4].map(index => fft.getFrequencyOfIndex(index)));

View file

@ -80,7 +80,7 @@ export class Meter extends MeterBase<MeterOptions> {
}
/**
* Use [[getValue]] instead. For the previous getValue behavior, use DCMeter.
* Use {@link getValue} instead. For the previous getValue behavior, use DCMeter.
* @deprecated
*/
getLevel(): number | number[] {
@ -90,9 +90,9 @@ export class Meter extends MeterBase<MeterOptions> {
/**
* Get the current value of the incoming signal.
* Output is in decibels when [[normalRange]] is `false`.
* If [[channels]] = 1, then the output is a single number
* representing the value of the input signal. When [[channels]] > 1,
* Output is in decibels when {@link normalRange} is `false`.
* If {@link channels} = 1, then the output is a single number
* representing the value of the input signal. When {@link channels} > 1,
* then each channel is returned as a value in a number array.
*/
getValue(): number | number[] {

View file

@ -46,7 +46,7 @@ export class Waveform extends MeterBase<WaveformOptions> {
/**
* The size of analysis. This must be a power of two in the range 16 to 16384.
* Determines the size of the array returned by [[getValue]].
* Determines the size of the array returned by {@link getValue}.
*/
get size(): PowerOfTwo {
return this._analyser.size;

View file

@ -17,7 +17,7 @@ export interface ChannelOptions extends ToneAudioNodeOptions {
/**
* Channel provides a channel strip interface with volume, pan, solo and mute controls.
* See [[PanVol]] and [[Solo]]
* See {@link PanVol} and {@link Solo}
* @example
* // pan the incoming signal left and drop the volume 12db
* const channel = new Tone.Channel(-0.25, -12);
@ -91,7 +91,7 @@ export class Channel extends ToneAudioNode<ChannelOptions> {
}
/**
* Solo/unsolo the channel. Soloing is only relative to other [[Channel]]s and [[Solo]] instances
* Solo/unsolo the channel. Soloing is only relative to other {@link Channel}s and {@link Solo} instances
*/
get solo(): boolean {
return this._solo.solo;
@ -137,9 +137,9 @@ export class Channel extends ToneAudioNode<ChannelOptions> {
/**
* Send audio to another channel using a string. `send` is a lot like
* [[connect]], except it uses a string instead of an object. This can
* be useful in large applications to decouple sections since [[send]]
* and [[receive]] can be invoked separately in order to connect an object
* {@link connect}, except it uses a string instead of an object. This can
* be useful in large applications to decouple sections since {@link send}
* and {@link receive} can be invoked separately in order to connect an object
* @param name The channel name to send the audio
* @param volume The amount of the signal to send.
* Defaults to 0db, i.e. send the entire signal
@ -158,7 +158,7 @@ export class Channel extends ToneAudioNode<ChannelOptions> {
}
/**
* Receive audio from a channel which was connected with [[send]].
* Receive audio from a channel which was connected with {@link send}.
* @param name The channel name to receive audio from.
*/
receive(name: string): this {

View file

@ -9,7 +9,7 @@ import { optionsFromArguments } from "../../core/util/Defaults";
export type MidSideMergeOptions = ToneAudioNodeOptions;
/**
* MidSideMerge merges the mid and side signal after they've been separated by [[MidSideSplit]]
* MidSideMerge merges the mid and side signal after they've been separated by {@link MidSideSplit}
* ```
* Mid = (Left+Right)/sqrt(2); // obtain mid-signal from left and right
* Side = (Left-Right)/sqrt(2); // obtain side-signal from left and right
@ -21,7 +21,7 @@ export class MidSideMerge extends ToneAudioNode<MidSideMergeOptions> {
readonly name: string = "MidSideMerge";
/**
* There is no input, connect sources to either [[mid]] or [[side]] inputs.
* There is no input, connect sources to either {@link mid} or {@link side} inputs.
*/
readonly input: undefined;

View file

@ -23,7 +23,7 @@ export class MidSideSplit extends ToneAudioNode<MidSideSplitOptions> {
readonly input: Split;
/**
* There is no output node, use either [[mid]] or [[side]] outputs.
* There is no output node, use either {@link mid} or {@link side} outputs.
*/
readonly output: undefined;
/**

View file

@ -127,7 +127,7 @@ export class Recorder extends ToneAudioNode<RecorderOptions> {
/**
* Stop the recorder. Returns a promise with the recorded content until this point
* encoded as [[mimeType]]
* encoded as {@link mimeType}
*/
async stop(): Promise<Blob> {
assert(this.state !== "stopped", "Recorder is not started");

View file

@ -13,8 +13,8 @@ export interface GateOptions extends ToneAudioNodeOptions {
/**
* Gate only passes a signal through when the incoming
* signal exceeds a specified threshold. It uses [[Follower]] to follow the ampltiude
* of the incoming signal and compares it to the [[threshold]] value using [[GreaterThan]].
* signal exceeds a specified threshold. It uses {@link Follower} to follow the ampltiude
* of the incoming signal and compares it to the {@link threshold} value using {@link GreaterThan}.
*
* @example
* const gate = new Tone.Gate(-30, 0.2).toDestination();
@ -90,7 +90,7 @@ export class Gate extends ToneAudioNode<GateOptions> {
}
/**
* The attack/decay speed of the gate. See [[Follower.smoothing]]
* The attack/decay speed of the gate. See {@link Follower.smoothing}
*/
get smoothing(): Time {
return this._follower.smoothing;

View file

@ -11,7 +11,7 @@ export interface LimiterOptions extends ToneAudioNodeOptions {
/**
* Limiter will limit the loudness of an incoming signal.
* Under the hood it's composed of a [[Compressor]] with a fast attack
* Under the hood it's composed of a {@link Compressor} with a fast attack
* and release and max compression ratio.
*
* @example

View file

@ -11,8 +11,8 @@ export interface MidSideCompressorOptions extends ToneAudioNodeOptions {
}
/**
* MidSideCompressor applies two different compressors to the [[mid]]
* and [[side]] signal components of the input. See [[MidSideSplit]] and [[MidSideMerge]].
* MidSideCompressor applies two different compressors to the {@link mid}
* and {@link side} signal components of the input. See {@link MidSideSplit} and {@link MidSideMerge}.
* @category Component
*/
export class MidSideCompressor extends ToneAudioNode<MidSideCompressorOptions> {

View file

@ -16,7 +16,7 @@ export interface MultibandCompressorOptions extends ToneAudioNodeOptions {
}
/**
* A compressor with separate controls over low/mid/high dynamics. See [[Compressor]] and [[MultibandSplit]]
* A compressor with separate controls over low/mid/high dynamics. See {@link Compressor} and {@link MultibandSplit}
*
* @example
* const multiband = new Tone.MultibandCompressor({

View file

@ -11,8 +11,8 @@ export interface FrequencyEnvelopeOptions extends EnvelopeOptions {
exponent: number;
}
/**
* FrequencyEnvelope is an [[Envelope]] which ramps between [[baseFrequency]]
* and [[octaves]]. It can also have an optional [[exponent]] to adjust the curve
* FrequencyEnvelope is an {@link Envelope} which ramps between {@link baseFrequency}
* and {@link octaves}. It can also have an optional {@link exponent} to adjust the curve
* which it ramps.
* @example
* const oscillator = new Tone.Oscillator().toDestination().start();

View file

@ -14,7 +14,7 @@ export interface BiquadFilterOptions extends ToneAudioNodeOptions {
/**
* Thin wrapper around the native Web Audio [BiquadFilterNode](https://webaudio.github.io/web-audio-api/#biquadfilternode).
* BiquadFilter is similar to [[Filter]] but doesn't have the option to set the "rolloff" value.
* BiquadFilter is similar to {@link Filter} but doesn't have the option to set the "rolloff" value.
* @category Component
*/
export class BiquadFilter extends ToneAudioNode<BiquadFilterOptions> {

View file

@ -14,7 +14,7 @@ interface LowpassCombFilterOptions extends ToneAudioNodeOptions {
/**
* A lowpass feedback comb filter. It is similar to
* [[FeedbackCombFilter]], but includes a lowpass filter.
* {@link FeedbackCombFilter}, but includes a lowpass filter.
* @category Component
*/
export class LowpassCombFilter extends ToneAudioNode<LowpassCombFilterOptions> {

View file

@ -18,7 +18,7 @@ const dummyContext = new DummyContext();
let globalContext: BaseContext = dummyContext;
/**
* Returns the default system-wide [[Context]]
* Returns the default system-wide {@link Context}
* @category Core
*/
export function getContext(): BaseContext {

View file

@ -13,7 +13,7 @@ interface TickParamOptions<TypeName extends UnitName> extends ParamOptions<TypeN
}
/**
* A Param class just for computing ticks. Similar to the [[Param]] class,
* A Param class just for computing ticks. Similar to the {@link Param} class,
* but offers conversion to BPM values as well as ability to compute tick
* duration and elapsed ticks
*/
@ -218,7 +218,7 @@ export class TickParam<TypeName extends "hertz" | "bpm"> extends Param<TypeName>
}
/**
* The inverse of [[ticksToTime]]. Convert a duration in
* The inverse of {@link ticksToTime}. Convert a duration in
* seconds to the corresponding number of ticks accounting for any
* automation curves starting at the given time.
* @param duration The time interval to convert to ticks.

View file

@ -10,7 +10,7 @@ export interface TransportEventOptions {
}
/**
* TransportEvent is an internal class used by [[TransportClass]]
* TransportEvent is an internal class used by {@link TransportClass}
* to schedule events. Do no invoke this class directly, it is
* handled from within Tone.Transport.
*/

View file

@ -1,7 +1,7 @@
import { Time, UnitMap, UnitName } from "../type/Units";
/**
* Abstract base class for [[Param]] and [[Signal]]
* Abstract base class for {@link Param} and {@link Signal}
*/
export abstract class AbstractParam<TypeName extends UnitName> {
@ -37,8 +37,8 @@ export abstract class AbstractParam<TypeName extends UnitName> {
/**
* Creates a schedule point with the current value at the current time.
* Automation methods like [[linearRampToValueAtTime]] and [[exponentialRampToValueAtTime]]
* require a starting automation value usually set by [[setValueAtTime]]. This method
* Automation methods like {@link linearRampToValueAtTime} and {@link exponentialRampToValueAtTime}
* require a starting automation value usually set by {@link setValueAtTime}. This method
* is useful since it will do a `setValueAtTime` with whatever the currently computed
* value at the given time is.
* @param time When to add a ramp point.
@ -196,7 +196,7 @@ export abstract class AbstractParam<TypeName extends UnitName> {
abstract cancelScheduledValues(time: Time): this;
/**
* This is similar to [[cancelScheduledValues]] except
* This is similar to {@link cancelScheduledValues} except
* it holds the automated value at time until the next automated event.
* @example
* return Tone.Offline(() => {

View file

@ -349,7 +349,7 @@ export class Context extends BaseContext {
/**
* Create an audio worklet node from a name and options. The module
* must first be loaded using [[addAudioWorkletModule]].
* must first be loaded using {@link addAudioWorkletModule}.
*/
createAudioWorkletNode(
name: string,
@ -413,7 +413,7 @@ export class Context extends BaseContext {
* The amount of time into the future events are scheduled. Giving Web Audio
* a short amount of time into the future to schedule events can reduce clicks and
* improve performance. This value can be set to 0 to get the lowest latency.
* Adjusting this value also affects the [[updateInterval]].
* Adjusting this value also affects the {@link updateInterval}.
*/
get lookAhead(): Seconds {
return this._lookAhead;
@ -452,7 +452,7 @@ export class Context extends BaseContext {
}
/**
* The current audio context time plus a short [[lookAhead]].
* The current audio context time plus a short {@link lookAhead}.
* @example
* setInterval(() => {
* console.log("now", Tone.now());
@ -463,11 +463,11 @@ export class Context extends BaseContext {
}
/**
* The current audio context time without the [[lookAhead]].
* In most cases it is better to use [[now]] instead of [[immediate]] since
* with [[now]] the [[lookAhead]] is applied equally to _all_ components including internal components,
* to making sure that everything is scheduled in sync. Mixing [[now]] and [[immediate]]
* can cause some timing issues. If no lookAhead is desired, you can set the [[lookAhead]] to `0`.
* The current audio context time without the {@link lookAhead}.
* In most cases it is better to use {@link now} instead of {@link immediate} since
* with {@link now} the {@link lookAhead} is applied equally to _all_ components including internal components,
* to making sure that everything is scheduled in sync. Mixing {@link now} and {@link immediate}
* can cause some timing issues. If no lookAhead is desired, you can set the {@link lookAhead} to `0`.
*/
immediate(): Seconds {
return this._context.currentTime;
@ -475,7 +475,7 @@ export class Context extends BaseContext {
/**
* Starts the audio context from a suspended state. This is required
* to initially start the AudioContext. See [[start]]
* to initially start the AudioContext. See {@link start}
*/
resume(): Promise<void> {
if (isAudioContext(this._context)) {
@ -593,7 +593,7 @@ export class Context extends BaseContext {
}
/**
* Clear the function scheduled by [[setInterval]]
* Clear the function scheduled by {@link setInterval}
*/
clearInterval(id: number): this {
return this.clearTimeout(id);

View file

@ -16,7 +16,7 @@ export interface ListenerOptions extends ToneAudioNodeOptions{
/**
* Tone.Listener is a thin wrapper around the AudioListener. Listener combined
* with [[Panner3D]] makes up the Web Audio API's 3D panning system. Panner3D allows you
* with {@link Panner3D} makes up the Web Audio API's 3D panning system. Panner3D allows you
* to place sounds in 3D and Listener allows you to navigate the 3D sound environment from
* a first-person perspective. There is only one listener per audio context.
*/

View file

@ -6,7 +6,7 @@ import { ToneAudioBuffer } from "./ToneAudioBuffer";
/**
* Generate a buffer by rendering all of the Tone.js code within the callback using the OfflineAudioContext.
* The OfflineAudioContext is capable of rendering much faster than real time in many cases.
* The callback function also passes in an offline instance of [[Context]] which can be used
* The callback function also passes in an offline instance of {@link Context} which can be used
* to schedule events along the Transport.
* @param callback All Tone.js nodes which are created and scheduled within this callback are recorded into the output Buffer.
* @param duration the amount of time to record for.

View file

@ -212,7 +212,7 @@ export abstract class ToneAudioNode<Options extends ToneAudioNodeOptions = ToneA
/**
* Connect the output to the context's destination node.
* See [[toDestination]]
* See {@link toDestination}
* @deprecated
*/
toMaster(): this {

View file

@ -95,7 +95,7 @@ export abstract class ToneWithContext<Options extends ToneWithContextOptions> ex
/**
* Convert the incoming time to seconds.
* This is calculated against the current [[TransportClass]] bpm
* This is calculated against the current {@link TransportClass} bpm
* @example
* const gain = new Tone.Gain();
* setInterval(() => console.log(gain.toSeconds("4n")), 100);

View file

@ -137,7 +137,7 @@ export class TimeClass<Type extends Seconds | Ticks = Seconds, Unit extends stri
/**
* Create a TimeClass from a time string or number. The time is computed against the
* global Tone.Context. To use a specific context, use [[TimeClass]]
* global Tone.Context. To use a specific context, use {@link TimeClass}
* @param value A value which represents time
* @param units The value's units if they can't be inferred by the value.
* @category Unit

View file

@ -2,7 +2,7 @@ import { addToWorklet } from "./WorkletGlobalScope";
const toneAudioWorkletProcessor = /* javascript */ `
/**
* The base AudioWorkletProcessor for use in Tone.js. Works with the [[ToneAudioWorklet]].
* The base AudioWorkletProcessor for use in Tone.js. Works with the {@link ToneAudioWorklet}.
*/
class ToneAudioWorkletProcessor extends AudioWorkletProcessor {

View file

@ -8,7 +8,7 @@ export interface AutoPannerOptions extends LFOEffectOptions {
}
/**
* AutoPanner is a [[Panner]] with an [[LFO]] connected to the pan amount.
* AutoPanner is a {@link Panner} with an {@link LFO} connected to the pan amount.
* [Related Reading](https://www.ableton.com/en/blog/autopan-chopper-effect-and-more-liveschool/).
*
* @example

View file

@ -19,7 +19,7 @@ export interface AutoWahOptions extends EffectOptions {
}
/**
* AutoWah connects a [[Follower]] to a [[Filter]].
* AutoWah connects a {@link Follower} to a {@link Filter}.
* The frequency of the filter, follows the input amplitude curve.
* Inspiration from [Tuna.js](https://github.com/Dinahmoe/tuna).
*

View file

@ -16,8 +16,8 @@ export interface ChorusOptions extends StereoFeedbackEffectOptions {
}
/**
* Chorus is a stereo chorus effect composed of a left and right delay with an [[LFO]] applied to the delayTime of each channel.
* When [[feedback]] is set to a value larger than 0, you also get Flanger-type effects.
* Chorus is a stereo chorus effect composed of a left and right delay with an {@link LFO} applied to the delayTime of each channel.
* When {@link feedback} is set to a value larger than 0, you also get Flanger-type effects.
* Inspiration from [Tuna.js](https://github.com/Dinahmoe/tuna/blob/master/tuna.js).
* Read more on the chorus effect on [Sound On Sound](http://www.soundonsound.com/sos/jun04/articles/synthsecrets.htm).
*
@ -196,7 +196,7 @@ export class Chorus extends StereoFeedbackEffect<ChorusOptions> {
}
/**
* Sync the filter to the transport. See [[LFO.sync]]
* Sync the filter to the transport. See {@link LFO.sync}
*/
sync(): this {
this._lfoL.sync();

View file

@ -23,7 +23,7 @@ const allpassFilterFrequencies = [225, 556, 441, 341];
/**
* Freeverb is a reverb based on [Freeverb](https://ccrma.stanford.edu/~jos/pasp/Freeverb.html).
* Read more on reverb on [Sound On Sound](https://web.archive.org/web/20160404083902/http://www.soundonsound.com:80/sos/feb01/articles/synthsecrets.asp).
* Freeverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using [[Reverb]].
* Freeverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using {@link Reverb}.
* @example
* const freeverb = new Tone.Freeverb().toDestination();
* freeverb.dampening = 1000;

View file

@ -28,8 +28,8 @@ const allpassFilterFreqs = [347, 113, 37];
/**
* JCReverb is a simple [Schroeder Reverberator](https://ccrma.stanford.edu/~jos/pasp/Schroeder_Reverberators.html)
* tuned by John Chowning in 1970.
* It is made up of three allpass filters and four [[FeedbackCombFilter]].
* JCReverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using [[Reverb]].
* It is made up of three allpass filters and four {@link FeedbackCombFilter}.
* JCReverb is now implemented with an AudioWorkletNode which may result on performance degradation on some platforms. Consider using {@link Reverb}.
* @example
* const reverb = new Tone.JCReverb(0.4).toDestination();
* const delay = new Tone.FeedbackDelay(0.5);

View file

@ -76,7 +76,7 @@ export abstract class LFOEffect<Options extends LFOEffectOptions> extends Effect
}
/**
* Sync the filter to the transport. See [[LFO.sync]]
* Sync the filter to the transport. See {@link LFO.sync}
*/
sync(): this {
this._lfo.sync();
@ -92,7 +92,7 @@ export abstract class LFOEffect<Options extends LFOEffectOptions> extends Effect
}
/**
* The type of the LFO's oscillator: See [[Oscillator.type]]
* The type of the LFO's oscillator: See {@link Oscillator.type}
* @example
* const autoFilter = new Tone.AutoFilter().start().toDestination();
* const noise = new Tone.Noise().start().connect(autoFilter);

View file

@ -18,7 +18,7 @@ interface ReverbOptions extends EffectOptions {
* Generates an Impulse Response Buffer
* with Tone.Offline then feeds the IR into ConvolverNode.
* The impulse response generation is async, so you have
* to wait until [[ready]] resolves before it will make a sound.
* to wait until {@link ready} resolves before it will make a sound.
*
* Inspiration from [ReverbGen](https://github.com/adelespinasse/reverbGen).
* Copyright (c) 2014 Alan deLespinasse Apache 2.0 License.
@ -45,8 +45,8 @@ export class Reverb extends Effect<ReverbOptions> {
private _preDelay: Seconds;
/**
* Resolves when the reverb buffer is generated. Whenever either [[decay]]
* or [[preDelay]] are set, you have to wait until [[ready]] resolves
* Resolves when the reverb buffer is generated. Whenever either {@link decay}
* or {@link preDelay} are set, you have to wait until {@link ready} resolves
* before the IR is generated with the latest values.
*/
ready: Promise<void> = Promise.resolve();

View file

@ -7,7 +7,7 @@ export interface StereoXFeedbackEffectOptions extends StereoFeedbackEffectOption
}
/**
* Just like a [[StereoFeedbackEffect]], but the feedback is routed from left to right
* Just like a {@link StereoFeedbackEffect}, but the feedback is routed from left to right
* and right to left instead of on the same channel.
* ```
* +--------------------------------+ feedbackL <-----------------------------------+

View file

@ -15,7 +15,7 @@ export interface TremoloOptions extends StereoEffectOptions {
}
/**
* Tremolo modulates the amplitude of an incoming signal using an [[LFO]].
* Tremolo modulates the amplitude of an incoming signal using an {@link LFO}.
* The effect is a stereo effect where the modulation phase is inverted in each channel.
*
* @example

View file

@ -8,8 +8,8 @@ import { Seconds } from "./core/type/Units";
export { supported } from "./core/context/AudioContext";
/**
* The current audio context time of the global [[Context]].
* See [[Context.now]]
* The current audio context time of the global {@link Context}.
* See {@link Context.now}
* @category Core
*/
export function now(): Seconds {
@ -17,8 +17,8 @@ export function now(): Seconds {
}
/**
* The current audio context time of the global [[Context]] without the [[Context.lookAhead]]
* See [[Context.immediate]]
* The current audio context time of the global {@link Context} without the {@link Context.lookAhead}
* See {@link Context.immediate}
* @category Core
*/
export function immediate(): Seconds {
@ -27,7 +27,7 @@ export function immediate(): Seconds {
/**
* The Transport object belonging to the global Tone.js Context.
* See [[TransportClass]]
* See {@link TransportClass}
* @category Core
* @deprecated Use {@link getTransport} instead
*/
@ -35,7 +35,7 @@ export const Transport = getContext().transport;
/**
* The Transport object belonging to the global Tone.js Context.
* See [[TransportClass]]
* See {@link TransportClass}
* @category Core
*/
export function getTransport(): import("./core/clock/Transport").TransportClass {
@ -44,7 +44,7 @@ export function getTransport(): import("./core/clock/Transport").TransportClass
/**
* The Destination (output) belonging to the global Tone.js Context.
* See [[DestinationClass]]
* See {@link DestinationClass}
* @category Core
* @deprecated Use {@link getDestination} instead
*/
@ -57,7 +57,7 @@ export const Master = getContext().destination;
/**
* The Destination (output) belonging to the global Tone.js Context.
* See [[DestinationClass]]
* See {@link DestinationClass}
* @category Core
*/
export function getDestination(): import("./core/context/Destination").DestinationClass {
@ -65,14 +65,14 @@ export function getDestination(): import("./core/context/Destination").Destinati
}
/**
* The [[ListenerClass]] belonging to the global Tone.js Context.
* The {@link ListenerClass} belonging to the global Tone.js Context.
* @category Core
* @deprecated Use {@link getListener} instead
*/
export const Listener = getContext().listener;
/**
* The [[ListenerClass]] belonging to the global Tone.js Context.
* The {@link ListenerClass} belonging to the global Tone.js Context.
* @category Core
*/
export function getListener(): import("./core/context/Listener").ListenerClass {
@ -81,7 +81,7 @@ export function getListener(): import("./core/context/Listener").ListenerClass {
/**
* Draw is used to synchronize the draw frame with the Transport's callbacks.
* See [[DrawClass]]
* See {@link DrawClass}
* @category Core
* @deprecated Use {@link getDraw} instead
*/
@ -90,7 +90,7 @@ export const Draw = getContext().draw;
/**
* Get the singleton attached to the global context.
* Draw is used to synchronize the draw frame with the Transport's callbacks.
* See [[DrawClass]]
* See {@link DrawClass}
* @category Core
*/
export function getDraw(): import("./core/util/Draw").DrawClass {
@ -99,14 +99,14 @@ export function getDraw(): import("./core/util/Draw").DrawClass {
/**
* A reference to the global context
* See [[Context]]
* See {@link Context}
* @deprecated Use {@link getContext} instead
*/
export const context = getContext();
/**
* Promise which resolves when all of the loading promises are resolved.
* Alias for static [[ToneAudioBuffer.loaded]] method.
* Alias for static {@link ToneAudioBuffer.loaded} method.
* @category Core
*/
export function loaded() {

View file

@ -18,7 +18,7 @@ export interface DuoSynthOptions extends MonophonicOptions {
}
/**
* DuoSynth is a monophonic synth composed of two [[MonoSynth]]s run in parallel with control over the
* DuoSynth is a monophonic synth composed of two {@link MonoSynth}s run in parallel with control over the
* frequency ratio between the two voices and vibrato effect.
* @example
* const duoSynth = new Tone.DuoSynth().toDestination();

View file

@ -67,7 +67,7 @@ export abstract class Instrument<Options extends InstrumentOptions> extends Tone
/**
* Sync the instrument to the Transport. All subsequent calls of
* [[triggerAttack]] and [[triggerRelease]] will be scheduled along the transport.
* {@link triggerAttack} and {@link triggerRelease} will be scheduled along the transport.
* @example
* const fmSynth = new Tone.FMSynth().toDestination();
* fmSynth.volume.value = -6;

View file

@ -91,7 +91,7 @@ export class MetalSynth extends Monophonic<MetalSynthOptions> {
/**
* The envelope which is connected both to the
* amplitude and a highpass filter's cutoff frequency.
* The lower-limit of the filter is controlled by the [[resonance]]
* The lower-limit of the filter is controlled by the {@link resonance}
*/
readonly envelope: Envelope;
@ -231,7 +231,7 @@ export class MetalSynth extends Monophonic<MetalSynthOptions> {
/**
* The modulationIndex of the oscillators which make up the source.
* see [[FMOscillator.modulationIndex]]
* see {@link FMOscillator.modulationIndex}
* @min 1
* @max 100
*/

View file

@ -17,7 +17,7 @@ export interface NoiseSynthOptions extends InstrumentOptions {
}
/**
* Tone.NoiseSynth is composed of [[Noise]] through an [[AmplitudeEnvelope]].
* Tone.NoiseSynth is composed of {@link Noise} through an {@link AmplitudeEnvelope}.
* ```
* +-------+ +-------------------+
* | Noise +>--> AmplitudeEnvelope +>--> Output

View file

@ -111,7 +111,7 @@ export class PluckSynth extends Instrument<PluckSynthOptions> {
}
/**
* Ramp down the [[resonance]] to 0 over the duration of the release time.
* Ramp down the {@link resonance} to 0 over the duration of the release time.
*/
triggerRelease(time?: Time): this {
this._lfcf.resonance.linearRampTo(0, this.release, time);

View file

@ -33,7 +33,7 @@ export interface SamplerOptions extends InstrumentOptions {
* were not explicitly included which can save loading time.
*
* For sample or buffer playback where repitching is not necessary,
* use [[Player]].
* use {@link Player}.
* @example
* const sampler = new Tone.Sampler({
* urls: {

View file

@ -17,7 +17,7 @@ export interface SynthOptions extends MonophonicOptions {
}
/**
* Synth is composed simply of a [[OmniOscillator]] routed through an [[AmplitudeEnvelope]].
* Synth is composed simply of a {@link OmniOscillator} routed through an {@link AmplitudeEnvelope}.
* ```
* +----------------+ +-------------------+
* | OmniOscillator +>--> AmplitudeEnvelope +>--> Output

View file

@ -39,7 +39,7 @@ export class Add extends Signal {
readonly addend: Param<"number"> = this._param;
/**
* @param value If no value is provided, will sum the input and [[addend]].
* @param value If no value is provided, will sum the input and {@link addend}.
*/
constructor(value?: number);
constructor(options?: Partial<SignalOptions<"number">>);

View file

@ -4,7 +4,7 @@ import { WaveShaper } from "./WaveShaper";
/**
* AudioToGain converts an input in AudioRange [-1,1] to NormalRange [0,1].
* See [[GainToAudio]].
* See {@link GainToAudio}.
* @category Signal
*/
export class AudioToGain extends SignalOperator<ToneAudioNodeOptions> {

View file

@ -4,7 +4,7 @@ import { WaveShaper } from "./WaveShaper";
/**
* GainToAudio converts an input in NormalRange [0,1] to AudioRange [-1,1].
* See [[AudioToGain]].
* See {@link AudioToGain}.
* @category Signal
*/
export class GainToAudio extends SignalOperator<ToneAudioNodeOptions> {

View file

@ -43,7 +43,7 @@ export class Multiply<TypeName extends "number" | "positive" = "number"> extends
input: InputNode;
/**
* The product of the input and [[factor]]
* The product of the input and {@link factor}
*/
output: OutputNode;

View file

@ -50,8 +50,8 @@ export class ScaleExp extends Scale<ScaleExpOptions> {
}
/**
* Instead of interpolating linearly between the [[min]] and
* [[max]] values, setting the exponent will interpolate between
* Instead of interpolating linearly between the {@link min} and
* {@link max} values, setting the exponent will interpolate between
* the two values with an exponential curve.
*/
get exponent(): Positive {

View file

@ -192,7 +192,7 @@ export class Signal<TypeName extends UnitName = "number"> extends ToneAudioNode<
}
/**
* See [[Param.apply]].
* See {@link Param.apply}.
*/
apply(param: Param | AudioParam): this {
this._param.apply(param);

View file

@ -6,7 +6,7 @@ import { ToneConstantSource } from "./ToneConstantSource";
import { OutputNode } from "../core/context/ToneAudioNode";
/**
* Adds the ability to synchronize the signal to the [[TransportClass]]
* Adds the ability to synchronize the signal to the {@link TransportClass}
* @category Signal
*/
export class SyncedSignal<TypeName extends UnitName = "number"> extends Signal<TypeName> {

View file

@ -432,7 +432,7 @@ export class Player extends Source<PlayerOptions> {
}
/**
* If the buffer should be reversed. Note that this sets the underlying [[ToneAudioBuffer.reverse]], so
* If the buffer should be reversed. Note that this sets the underlying {@link ToneAudioBuffer.reverse}, so
* if multiple players are pointing at the same ToneAudioBuffer, they will all be reversed.
* @example
* const player = new Tone.Player("https://tonejs.github.io/audio/berklee/chime_1.mp3").toDestination();

View file

@ -23,7 +23,7 @@ export interface PlayersOptions extends SourceOptions {
}
/**
* Players combines multiple [[Player]] objects.
* Players combines multiple {@link Player} objects.
* @category Source
*/
export class Players extends ToneAudioNode<PlayersOptions> {

View file

@ -94,7 +94,7 @@ export class LFO extends ToneAudioNode<LFOOptions> {
private _units: UnitName = "number";
/**
* If the input value is converted using the [[units]]
* If the input value is converted using the {@link units}
*/
convert = true;
@ -238,7 +238,7 @@ export class LFO extends ToneAudioNode<LFOOptions> {
}
/**
* The type of the oscillator: See [[Oscillator.type]]
* The type of the oscillator: See {@link Oscillator.type}
*/
get type(): ToneOscillatorType {
return this._oscillator.type;
@ -249,7 +249,7 @@ export class LFO extends ToneAudioNode<LFOOptions> {
}
/**
* The oscillator's partials array: See [[Oscillator.partials]]
* The oscillator's partials array: See {@link Oscillator.partials}
*/
get partials(): number[] {
return this._oscillator.partials;

View file

@ -192,7 +192,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The value is an empty array when the type is not "custom".
* This is not available on "pwm" and "pulse" oscillator types.
* See [[Oscillator.partials]]
* See {@link Oscillator.partials}
*/
get partials(): number[] {
return this._oscillator.partials;
@ -297,7 +297,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
}
/**
* The base type of the oscillator. See [[Oscillator.baseType]]
* The base type of the oscillator. See {@link Oscillator.baseType}
* @example
* const omniOsc = new Tone.OmniOscillator(440, "fmsquare4");
* console.log(omniOsc.sourceType, omniOsc.baseType, omniOsc.partialCount);
@ -315,7 +315,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The width of the oscillator when sourceType === "pulse".
* See [[PWMOscillator]]
* See {@link PWMOscillator}
*/
get width(): IsPulseOscillator<OscType, Signal<"audioRange">> {
if (this._getOscType(this._oscillator, "pulse")) {
@ -327,7 +327,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The number of detuned oscillators when sourceType === "fat".
* See [[FatOscillator.count]]
* See {@link FatOscillator.count}
*/
get count(): IsFatOscillator<OscType, number> {
if (this._getOscType(this._oscillator, "fat")) {
@ -344,7 +344,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The detune spread between the oscillators when sourceType === "fat".
* See [[FatOscillator.count]]
* See {@link FatOscillator.count}
*/
get spread(): IsFatOscillator<OscType, Cents> {
if (this._getOscType(this._oscillator, "fat")) {
@ -361,7 +361,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The type of the modulator oscillator. Only if the oscillator is set to "am" or "fm" types.
* See [[AMOscillator]] or [[FMOscillator]]
* See {@link AMOscillator} or {@link FMOscillator}
*/
get modulationType(): IsAmOrFmOscillator<OscType, ToneOscillatorType> {
if (this._getOscType(this._oscillator, "fm") || this._getOscType(this._oscillator, "am")) {
@ -378,7 +378,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The modulation index when the sourceType === "fm"
* See [[FMOscillator]].
* See {@link FMOscillator}.
*/
get modulationIndex(): IsFMOscillator<OscType, Signal<"positive">> {
if (this._getOscType(this._oscillator, "fm")) {
@ -390,7 +390,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* Harmonicity is the frequency ratio between the carrier and the modulator oscillators.
* See [[AMOscillator]] or [[FMOscillator]]
* See {@link AMOscillator} or {@link FMOscillator}
*/
get harmonicity(): IsAmOrFmOscillator<OscType, Signal<"positive">> {
if (this._getOscType(this._oscillator, "fm") || this._getOscType(this._oscillator, "am")) {
@ -402,7 +402,7 @@ export class OmniOscillator<OscType extends AnyOscillator>
/**
* The modulationFrequency Signal of the oscillator when sourceType === "pwm"
* see [[PWMOscillator]]
* see {@link PWMOscillator}
* @min 0.1
* @max 5
*/

View file

@ -14,7 +14,7 @@ export interface ToneOscillatorNodeOptions extends OneShotSourceOptions {
/**
* Wrapper around the native fire-and-forget OscillatorNode.
* Adds the ability to reschedule the stop method.
* ***[[Oscillator]] is better for most use-cases***
* ***{@link Oscillator} is better for most use-cases***
* @category Source
*/
export class ToneOscillatorNode extends OneShotSource<ToneOscillatorNodeOptions> {