{"version":3,"file":"simple-statistics.min.js","sources":["../src/sum.js","../src/mean.js","../src/sum_nth_power_deviations.js","../src/variance.js","../src/standard_deviation.js","../src/mode_sorted.js","../src/numeric_sort.js","../src/min.js","../src/max.js","../src/sum_simple.js","../src/quantile_sorted.js","../src/quickselect.js","../src/quantile.js","../src/quantile_rank_sorted.js","../src/interquartile_range.js","../src/median.js","../src/median_absolute_deviation.js","../src/shuffle_in_place.js","../src/shuffle.js","../src/sample.js","../src/make_matrix.js","../src/unique_count_sorted.js","../src/ckmeans.js","../src/sample_covariance.js","../src/sample_variance.js","../src/sample_standard_deviation.js","../src/sample_correlation.js","../src/combine_means.js","../src/mean_simple.js","../src/root_mean_square.js","../src/bayesian_classifier.js","../src/perceptron.js","../src/epsilon.js","../src/factorial.js","../src/gammaln.js","../src/chi_squared_distribution_table.js","../src/kernel_density_estimation.js","../src/standard_normal_table.js","../src/error_function.js","../src/inverse_error_function.js","../src/sign.js","../src/euclidean_distance.js","../src/k_means_cluster.js","../src/silhouette.js","../src/relative_error.js","../src/add_to_mean.js","../src/approx_equal.js","../src/bernoulli_distribution.js","../src/binomial_distribution.js","../src/bisect.js","../src/chi_squared_goodness_of_fit.js","../src/chunk.js","../src/coefficient_of_variation.js","../src/combinations.js","../src/combinations_replacement.js","../src/combine_variances.js","../src/cumulative_std_logistic_probability.js","../src/cumulative_std_normal_probability.js","../src/equal_interval_breaks.js","../src/extent.js","../src/extent_sorted.js","../src/gamma.js","../src/geometric_mean.js","../src/harmonic_mean.js","../src/jenks.js","../src/jenks_matrices.js","../src/jenks_breaks.js","../src/linear_regression.js","../src/linear_regression_line.js","../src/log_average.js","../src/logit.js","../src/max_sorted.js","../src/median_sorted.js","../src/min_sorted.js","../src/mode.js","../src/mode_fast.js","../src/permutation_test.js","../src/permutations_heap.js","../src/poisson_distribution.js","../src/probit.js","../src/product.js","../src/quantile_rank.js","../src/r_squared.js","../src/sample_kurtosis.js","../src/sample_rank_correlation.js","../src/sample_skewness.js","../src/sample_with_replacement.js","../src/silhouette_metric.js","../src/subtract_from_mean.js","../src/t_test.js","../src/t_test_two_sample.js","../src/wilcoxon_rank_sum.js","../src/z_score.js"],"sourcesContent":["/**\n * Our default sum is the [Kahan-Babuska algorithm](https://pdfs.semanticscholar.org/1760/7d467cda1d0277ad272deb2113533131dc09.pdf).\n * This method is an improvement over the classical\n * [Kahan summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm).\n * It aims at computing the sum of a list of numbers while correcting for\n * floating-point errors. Traditionally, sums are calculated as many\n * successive additions, each one with its own floating-point roundoff. These\n * losses in precision add up as the number of numbers increases. This alternative\n * algorithm is more accurate than the simple way of calculating sums by simple\n * addition.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x input\n * @return {number} sum of all input numbers\n * @example\n * sum([1, 2, 3]); // => 6\n */\nfunction sum(x) {\n // If the array is empty, we needn't bother computing its sum\n if (x.length === 0) {\n return 0;\n }\n\n // Initializing the sum as the first number in the array\n let sum = x[0];\n\n // Keeping track of the floating-point error correction\n let correction = 0;\n\n let transition;\n\n if (typeof sum !== \"number\") {\n return Number.NaN;\n }\n\n for (let i = 1; i < x.length; i++) {\n if (typeof x[i] !== \"number\") {\n return Number.NaN;\n }\n transition = sum + x[i];\n\n // Here we need to update the correction in a different fashion\n // if the new absolute value is greater than the absolute sum\n if (Math.abs(sum) >= Math.abs(x[i])) {\n correction += sum - transition + x[i];\n } else {\n correction += x[i] - transition + sum;\n }\n\n sum = transition;\n }\n\n // Returning the corrected sum\n return sum + correction;\n}\n\nexport default sum;\n","import sum from \"./sum.js\";\n\n/**\n * The mean, _also known as average_,\n * is the sum of all values over the number of values.\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @throws {Error} if the length of x is less than one\n * @returns {number} mean\n * @example\n * mean([0, 10]); // => 5\n */\nfunction mean(x) {\n if (x.length === 0) {\n throw new Error(\"mean requires at least one data point\");\n }\n\n return sum(x) / x.length;\n}\n\nexport default mean;\n","import mean from \"./mean.js\";\n\n/**\n * The sum of deviations to the Nth power.\n * When n=2 it's the sum of squared deviations.\n * When n=3 it's the sum of cubed deviations.\n *\n * @param {Array} x\n * @param {number} n power\n * @returns {number} sum of nth power deviations\n *\n * @example\n * var input = [1, 2, 3];\n * // since the variance of a set is the mean squared\n * // deviations, we can calculate that with sumNthPowerDeviations:\n * sumNthPowerDeviations(input, 2) / input.length;\n */\nfunction sumNthPowerDeviations(x, n) {\n const meanValue = mean(x);\n let sum = 0;\n let tempValue;\n let i;\n\n // This is an optimization: when n is 2 (we're computing a number squared),\n // multiplying the number by itself is significantly faster than using\n // the Math.pow method.\n if (n === 2) {\n for (i = 0; i < x.length; i++) {\n tempValue = x[i] - meanValue;\n sum += tempValue * tempValue;\n }\n } else {\n for (i = 0; i < x.length; i++) {\n sum += Math.pow(x[i] - meanValue, n);\n }\n }\n\n return sum;\n}\n\nexport default sumNthPowerDeviations;\n","import sumNthPowerDeviations from \"./sum_nth_power_deviations.js\";\n\n/**\n * The [variance](http://en.wikipedia.org/wiki/Variance)\n * is the sum of squared deviations from the mean.\n *\n * This is an implementation of variance, not sample variance:\n * see the `sampleVariance` method if you want a sample measure.\n *\n * @param {Array} x a population of one or more data points\n * @returns {number} variance: a value greater than or equal to zero.\n * zero indicates that all values are identical.\n * @throws {Error} if x's length is 0\n * @example\n * variance([1, 2, 3, 4, 5, 6]); // => 2.9166666666666665\n */\nfunction variance(x) {\n if (x.length === 0) {\n throw new Error(\"variance requires at least one data point\");\n }\n\n // Find the mean of squared deviations between the\n // mean value and each value.\n return sumNthPowerDeviations(x, 2) / x.length;\n}\n\nexport default variance;\n","import variance from \"./variance.js\";\n\n/**\n * The [standard deviation](http://en.wikipedia.org/wiki/Standard_deviation)\n * is the square root of the variance. This is also known as the population\n * standard deviation. It's useful for measuring the amount\n * of variation or dispersion in a set of values.\n *\n * Standard deviation is only appropriate for full-population knowledge: for\n * samples of a population, {@link sampleStandardDeviation} is\n * more appropriate.\n *\n * @param {Array} x input\n * @returns {number} standard deviation\n * @example\n * variance([2, 4, 4, 4, 5, 5, 7, 9]); // => 4\n * standardDeviation([2, 4, 4, 4, 5, 5, 7, 9]); // => 2\n */\nfunction standardDeviation(x) {\n if (x.length === 1) {\n return 0;\n }\n const v = variance(x);\n return Math.sqrt(v);\n}\n\nexport default standardDeviation;\n","/**\n * The [mode](https://en.wikipedia.org/wiki/Mode_%28statistics%29) is the number\n * that appears in a list the highest number of times.\n * There can be multiple modes in a list: in the event of a tie, this\n * algorithm will return the most recently seen mode.\n *\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * This runs in `O(n)` because the input is sorted.\n *\n * @param {Array} sorted a sample of one or more data points\n * @returns {number} mode\n * @throws {Error} if sorted is empty\n * @example\n * modeSorted([0, 0, 1]); // => 0\n */\nfunction modeSorted(sorted) {\n // Handle edge cases:\n // The mode of an empty list is undefined\n if (sorted.length === 0) {\n throw new Error(\"mode requires at least one data point\");\n }\n if (sorted.length === 1) {\n return sorted[0];\n }\n\n // This assumes it is dealing with an array of size > 1, since size\n // 0 and 1 are handled immediately. Hence it starts at index 1 in the\n // array.\n let last = sorted[0];\n // store the mode as we find new modes\n let value = Number.NaN;\n // store how many times we've seen the mode\n let maxSeen = 0;\n // how many times the current candidate for the mode\n // has been seen\n let seenThis = 1;\n\n // end at sorted.length + 1 to fix the case in which the mode is\n // the highest number that occurs in the sequence. the last iteration\n // compares sorted[i], which is undefined, to the highest number\n // in the series\n for (let i = 1; i < sorted.length + 1; i++) {\n // we're seeing a new number pass by\n if (sorted[i] !== last) {\n // the last number is the new mode since we saw it more\n // often than the old one\n if (seenThis > maxSeen) {\n maxSeen = seenThis;\n value = last;\n }\n seenThis = 1;\n last = sorted[i];\n // if this isn't a new number, it's one more occurrence of\n // the potential mode\n } else {\n seenThis++;\n }\n }\n return value;\n}\n\nexport default modeSorted;\n","/**\n * Sort an array of numbers by their numeric value, ensuring that the\n * array is not changed in place.\n *\n * This is necessary because the default behavior of .sort\n * in JavaScript is to sort arrays as string values\n *\n * [1, 10, 12, 102, 20].sort()\n * // output\n * [1, 10, 102, 12, 20]\n *\n * @param {Array} x input array\n * @return {Array} sorted array\n * @private\n * @example\n * numericSort([3, 2, 1]) // => [1, 2, 3]\n */\nfunction numericSort(x) {\n return (\n x\n // ensure the array is not changed in-place\n .slice()\n // comparator function that treats input as numeric\n .sort(function (a, b) {\n return a - b;\n })\n );\n}\n\nexport default numericSort;\n","/**\n * The min is the lowest number in the array.\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @throws {Error} if the length of x is less than one\n * @returns {number} minimum value\n * @example\n * min([1, 5, -10, 100, 2]); // => -10\n */\nfunction min(x) {\n if (x.length === 0) {\n throw new Error(\"min requires at least one data point\");\n }\n\n let value = x[0];\n for (let i = 1; i < x.length; i++) {\n if (x[i] < value) {\n value = x[i];\n }\n }\n return value;\n}\n\nexport default min;\n","/**\n * This computes the maximum number in an array.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @returns {number} maximum value\n * @throws {Error} if the length of x is less than one\n * @example\n * max([1, 2, 3, 4]);\n * // => 4\n */\nfunction max(x) {\n if (x.length === 0) {\n throw new Error(\"max requires at least one data point\");\n }\n\n let value = x[0];\n for (let i = 1; i < x.length; i++) {\n if (x[i] > value) {\n value = x[i];\n }\n }\n return value;\n}\n\nexport default max;\n","/**\n * The simple [sum](https://en.wikipedia.org/wiki/Summation) of an array\n * is the result of adding all numbers together, starting from zero.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x input\n * @return {number} sum of all input numbers\n * @example\n * sumSimple([1, 2, 3]); // => 6\n */\nfunction sumSimple(x) {\n let value = 0;\n for (let i = 0; i < x.length; i++) {\n if (typeof x[i] !== \"number\") {\n return Number.NaN;\n }\n value += x[i];\n }\n return value;\n}\n\nexport default sumSimple;\n","/**\n * This is the internal implementation of quantiles: when you know\n * that the order is sorted, you don't need to re-sort it, and the computations\n * are faster.\n *\n * @param {Array} x sample of one or more data points\n * @param {number} p desired quantile: a number between 0 to 1, inclusive\n * @returns {number} quantile value\n * @throws {Error} if p ix outside of the range from 0 to 1\n * @throws {Error} if x is empty\n * @example\n * quantileSorted([3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20], 0.5); // => 9\n */\nfunction quantileSorted(x, p) {\n const idx = x.length * p;\n if (x.length === 0) {\n throw new Error(\"quantile requires at least one data point.\");\n } else if (p < 0 || p > 1) {\n throw new Error(\"quantiles must be between 0 and 1\");\n } else if (p === 1) {\n // If p is 1, directly return the last element\n return x[x.length - 1];\n } else if (p === 0) {\n // If p is 0, directly return the first element\n return x[0];\n } else if (idx % 1 !== 0) {\n // If p is not integer, return the next element in array\n return x[Math.ceil(idx) - 1];\n } else if (x.length % 2 === 0) {\n // If the list has even-length, we'll take the average of this number\n // and the next value, if there is one\n return (x[idx - 1] + x[idx]) / 2;\n } else {\n // Finally, in the simple case of an integer value\n // with an odd-length list, return the x value at the index.\n return x[idx];\n }\n}\n\nexport default quantileSorted;\n","/**\n * Rearrange items in `arr` so that all items in `[left, k]` range are the smallest.\n * The `k`-th element will have the `(k - left + 1)`-th smallest value in `[left, right]`.\n *\n * Implements Floyd-Rivest selection algorithm https://en.wikipedia.org/wiki/Floyd-Rivest_algorithm\n *\n * @param {Array} arr input array\n * @param {number} k pivot index\n * @param {number} [left] left index\n * @param {number} [right] right index\n * @returns {void} mutates input array\n * @example\n * var arr = [65, 28, 59, 33, 21, 56, 22, 95, 50, 12, 90, 53, 28, 77, 39];\n * quickselect(arr, 8);\n * // = [39, 28, 28, 33, 21, 12, 22, 50, 53, 56, 59, 65, 90, 77, 95]\n */\nfunction quickselect(arr, k, left, right) {\n left = left || 0;\n right = right || arr.length - 1;\n\n while (right > left) {\n // 600 and 0.5 are arbitrary constants chosen in the original paper to minimize execution time\n if (right - left > 600) {\n const n = right - left + 1;\n const m = k - left + 1;\n const z = Math.log(n);\n const s = 0.5 * Math.exp((2 * z) / 3);\n let sd = 0.5 * Math.sqrt((z * s * (n - s)) / n);\n if (m - n / 2 < 0) sd *= -1;\n const newLeft = Math.max(left, Math.floor(k - (m * s) / n + sd));\n const newRight = Math.min(\n right,\n Math.floor(k + ((n - m) * s) / n + sd),\n );\n quickselect(arr, k, newLeft, newRight);\n }\n\n const t = arr[k];\n let i = left;\n let j = right;\n\n swap(arr, left, k);\n if (arr[right] > t) swap(arr, left, right);\n\n while (i < j) {\n swap(arr, i, j);\n i++;\n j--;\n while (arr[i] < t) i++;\n while (arr[j] > t) j--;\n }\n\n if (arr[left] === t) swap(arr, left, j);\n else {\n j++;\n swap(arr, j, right);\n }\n\n if (j <= k) left = j + 1;\n if (k <= j) right = j - 1;\n }\n}\n\nfunction swap(arr, i, j) {\n const tmp = arr[i];\n arr[i] = arr[j];\n arr[j] = tmp;\n}\n\nexport default quickselect;\n","import quantileSorted from \"./quantile_sorted.js\";\nimport quickselect from \"./quickselect.js\";\n\n/**\n * The [quantile](https://en.wikipedia.org/wiki/Quantile):\n * this is a population quantile, since we assume to know the entire\n * dataset in this library. This is an implementation of the\n * [Quantiles of a Population](http://en.wikipedia.org/wiki/Quantile#Quantiles_of_a_population)\n * algorithm from wikipedia.\n *\n * Sample is a one-dimensional array of numbers,\n * and p is either a decimal number from 0 to 1 or an array of decimal\n * numbers from 0 to 1.\n * In terms of a k/q quantile, p = k/q - it's just dealing with fractions or dealing\n * with decimal values.\n * When p is an array, the result of the function is also an array containing the appropriate\n * quantiles in input order\n *\n * @param {Array} x sample of one or more numbers\n * @param {Array | number} p the desired quantile, as a number between 0 and 1\n * @returns {number} quantile\n * @example\n * quantile([3, 6, 7, 8, 8, 9, 10, 13, 15, 16, 20], 0.5); // => 9\n */\nfunction quantile(x, p) {\n const copy = x.slice();\n\n if (Array.isArray(p)) {\n // rearrange elements so that each element corresponding to a requested\n // quantile is on a place it would be if the array was fully sorted\n multiQuantileSelect(copy, p);\n // Initialize the result array\n const results = [];\n // For each requested quantile\n for (let i = 0; i < p.length; i++) {\n results[i] = quantileSorted(copy, p[i]);\n }\n return results;\n } else {\n const idx = quantileIndex(copy.length, p);\n quantileSelect(copy, idx, 0, copy.length - 1);\n return quantileSorted(copy, p);\n }\n}\n\nfunction quantileSelect(arr, k, left, right) {\n if (k % 1 === 0) {\n quickselect(arr, k, left, right);\n } else {\n k = Math.floor(k);\n quickselect(arr, k, left, right);\n quickselect(arr, k + 1, k + 1, right);\n }\n}\n\nfunction multiQuantileSelect(arr, p) {\n const indices = [0];\n for (let i = 0; i < p.length; i++) {\n indices.push(quantileIndex(arr.length, p[i]));\n }\n indices.push(arr.length - 1);\n indices.sort(compare);\n\n const stack = [0, indices.length - 1];\n\n while (stack.length) {\n const r = Math.ceil(stack.pop());\n const l = Math.floor(stack.pop());\n if (r - l <= 1) continue;\n\n const m = Math.floor((l + r) / 2);\n quantileSelect(\n arr,\n indices[m],\n Math.floor(indices[l]),\n Math.ceil(indices[r]),\n );\n\n stack.push(l, m, m, r);\n }\n}\n\nfunction compare(a, b) {\n return a - b;\n}\n\nfunction quantileIndex(len, p) {\n const idx = len * p;\n if (p === 1) {\n // If p is 1, directly return the last index\n return len - 1;\n } else if (p === 0) {\n // If p is 0, directly return the first index\n return 0;\n } else if (idx % 1 !== 0) {\n // If index is not integer, return the next index in array\n return Math.ceil(idx) - 1;\n } else if (len % 2 === 0) {\n // If the list has even-length, we'll return the middle of two indices\n // around quantile to indicate that we need an average value of the two\n return idx - 0.5;\n } else {\n // Finally, in the simple case of an integer index\n // with an odd-length list, return the index\n return idx;\n }\n}\n\nexport default quantile;\n","/* eslint no-bitwise: 0 */\n\n/**\n * This function returns the quantile in which one would find the given value in\n * the given array. With a sorted array, leveraging binary search, we can find\n * this information in logarithmic time.\n *\n * @param {Array} x input\n * @returns {number} value value\n * @example\n * quantileRankSorted([1, 2, 3, 4], 3); // => 0.75\n * quantileRankSorted([1, 2, 3, 3, 4], 3); // => 0.7\n * quantileRankSorted([1, 2, 3, 4], 6); // => 1\n * quantileRankSorted([1, 2, 3, 3, 5], 4); // => 0.8\n */\nfunction quantileRankSorted(x, value) {\n // Value is lesser than any value in the array\n if (value < x[0]) {\n return 0;\n }\n\n // Value is greater than any value in the array\n if (value > x[x.length - 1]) {\n return 1;\n }\n\n let l = lowerBound(x, value);\n\n // Value is not in the array\n if (x[l] !== value) {\n return l / x.length;\n }\n\n l++;\n\n const u = upperBound(x, value);\n\n // The value exists only once in the array\n if (u === l) {\n return l / x.length;\n }\n\n // Here, we are basically computing the mean of the range of indices\n // containing our searched value. But, instead, of initializing an\n // array and looping over it, there is a dedicated math formula that\n // we apply below to get the result.\n const r = u - l + 1;\n const sum = (r * (u + l)) / 2;\n const mean = sum / r;\n\n return mean / x.length;\n}\n\nfunction lowerBound(x, value) {\n let mid = 0;\n let lo = 0;\n let hi = x.length;\n\n while (lo < hi) {\n mid = (lo + hi) >>> 1;\n\n if (value <= x[mid]) {\n hi = mid;\n } else {\n lo = -~mid;\n }\n }\n\n return lo;\n}\n\nfunction upperBound(x, value) {\n let mid = 0;\n let lo = 0;\n let hi = x.length;\n\n while (lo < hi) {\n mid = (lo + hi) >>> 1;\n\n if (value >= x[mid]) {\n lo = -~mid;\n } else {\n hi = mid;\n }\n }\n\n return lo;\n}\n\nexport default quantileRankSorted;\n","import quantile from \"./quantile.js\";\n\n/**\n * The [Interquartile range](http://en.wikipedia.org/wiki/Interquartile_range) is\n * a measure of statistical dispersion, or how scattered, spread, or\n * concentrated a distribution is. It's computed as the difference between\n * the third quartile and first quartile.\n *\n * @param {Array} x sample of one or more numbers\n * @returns {number} interquartile range: the span between lower and upper quartile,\n * 0.25 and 0.75\n * @example\n * interquartileRange([0, 1, 2, 3]); // => 2\n */\nfunction interquartileRange(x) {\n // Interquartile range is the span between the upper quartile,\n // at `0.75`, and lower quartile, `0.25`\n const q1 = quantile(x, 0.75);\n const q2 = quantile(x, 0.25);\n\n if (typeof q1 === \"number\" && typeof q2 === \"number\") {\n return q1 - q2;\n }\n}\n\nexport default interquartileRange;\n","import quantile from \"./quantile.js\";\n\n/**\n * The [median](http://en.wikipedia.org/wiki/Median) is\n * the middle number of a list. This is often a good indicator of 'the middle'\n * when there are outliers that skew the `mean()` value.\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * The median isn't necessarily one of the elements in the list: the value\n * can be the average of two elements if the list has an even length\n * and the two central values are different.\n *\n * @param {Array} x input\n * @returns {number} median value\n * @example\n * median([10, 2, 5, 100, 2, 1]); // => 3.5\n */\nfunction median(x) {\n return +quantile(x, 0.5);\n}\n\nexport default median;\n","import median from \"./median.js\";\n\n/**\n * The [Median Absolute Deviation](http://en.wikipedia.org/wiki/Median_absolute_deviation) is\n * a robust measure of statistical\n * dispersion. It is more resilient to outliers than the standard deviation.\n *\n * @param {Array} x input array\n * @returns {number} median absolute deviation\n * @example\n * medianAbsoluteDeviation([1, 1, 2, 2, 4, 6, 9]); // => 1\n */\nfunction medianAbsoluteDeviation(x) {\n const medianValue = median(x);\n const medianAbsoluteDeviations = [];\n\n // Make a list of absolute deviations from the median\n for (let i = 0; i < x.length; i++) {\n medianAbsoluteDeviations.push(Math.abs(x[i] - medianValue));\n }\n\n // Find the median value of that list\n return median(medianAbsoluteDeviations);\n}\n\nexport default medianAbsoluteDeviation;\n","/**\n * A [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle)\n * in-place - which means that it **will change the order of the original\n * array by reference**.\n *\n * This is an algorithm that generates a random [permutation](https://en.wikipedia.org/wiki/Permutation)\n * of a set.\n *\n * @param {Array} x sample of one or more numbers\n * @param {Function} [randomSource=Math.random] an optional entropy source that\n * returns numbers between 0 inclusive and 1 exclusive: the range [0, 1)\n * @returns {Array} x\n * @example\n * var x = [1, 2, 3, 4];\n * shuffleInPlace(x);\n * // x is shuffled to a value like [2, 1, 4, 3]\n */\nfunction shuffleInPlace(x, randomSource) {\n // a custom random number source can be provided if you want to use\n // a fixed seed or another random number generator, like\n // [random-js](https://www.npmjs.org/package/random-js)\n randomSource = randomSource || Math.random;\n\n // store the current length of the x to determine\n // when no elements remain to shuffle.\n let length = x.length;\n\n // temporary is used to hold an item when it is being\n // swapped between indices.\n let temporary;\n\n // The index to swap at each stage.\n let index;\n\n // While there are still items to shuffle\n while (length > 0) {\n // choose a random index within the subset of the array\n // that is not yet shuffled\n index = Math.floor(randomSource() * length--);\n\n // store the value that we'll move temporarily\n temporary = x[length];\n\n // swap the value at `x[length]` with `x[index]`\n x[length] = x[index];\n x[index] = temporary;\n }\n\n return x;\n}\n\nexport default shuffleInPlace;\n","import shuffleInPlace from \"./shuffle_in_place.js\";\n\n/**\n * A [Fisher-Yates shuffle](http://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle)\n * is a fast way to create a random permutation of a finite set. This is\n * a function around `shuffle_in_place` that adds the guarantee that\n * it will not modify its input.\n *\n * @param {Array} x sample of 0 or more numbers\n * @param {Function} [randomSource=Math.random] an optional entropy source that\n * returns numbers between 0 inclusive and 1 exclusive: the range [0, 1)\n * @return {Array} shuffled version of input\n * @example\n * var shuffled = shuffle([1, 2, 3, 4]);\n * shuffled; // = [2, 3, 1, 4] or any other random permutation\n */\nfunction shuffle(x, randomSource) {\n // slice the original array so that it is not modified\n const sample = x.slice();\n\n // and then shuffle that shallow-copied array, in place\n return shuffleInPlace(sample, randomSource);\n}\n\nexport default shuffle;\n","import shuffle from \"./shuffle.js\";\n\n/**\n * Create a [simple random sample](http://en.wikipedia.org/wiki/Simple_random_sample)\n * from a given array of `n` elements.\n *\n * The sampled values will be in any order, not necessarily the order\n * they appear in the input.\n *\n * @param {Array} x input array. can contain any type\n * @param {number} n count of how many elements to take\n * @param {Function} [randomSource=Math.random] an optional entropy source that\n * returns numbers between 0 inclusive and 1 exclusive: the range [0, 1)\n * @return {Array} subset of n elements in original array\n *\n * @example\n * var values = [1, 2, 4, 5, 6, 7, 8, 9];\n * sample(values, 3); // returns 3 random values, like [2, 5, 8];\n */\nfunction sample(x, n, randomSource) {\n // shuffle the original array using a fisher-yates shuffle\n const shuffled = shuffle(x, randomSource);\n\n // and then return a subset of it - the first `n` elements.\n return shuffled.slice(0, n);\n}\n\nexport default sample;\n","/**\n * Create a new column x row matrix.\n *\n * @private\n * @param {number} columns\n * @param {number} rows\n * @return {Array>} matrix\n * @example\n * makeMatrix(10, 10);\n */\nfunction makeMatrix(columns, rows) {\n const matrix = [];\n for (let i = 0; i < columns; i++) {\n const column = [];\n for (let j = 0; j < rows; j++) {\n column.push(0);\n }\n matrix.push(column);\n }\n return matrix;\n}\n\nexport default makeMatrix;\n","/**\n * For a sorted input, counting the number of unique values\n * is possible in constant time and constant memory. This is\n * a simple implementation of the algorithm.\n *\n * Values are compared with `===`, so objects and non-primitive objects\n * are not handled in any special way.\n *\n * @param {Array<*>} x an array of any kind of value\n * @returns {number} count of unique values\n * @example\n * uniqueCountSorted([1, 2, 3]); // => 3\n * uniqueCountSorted([1, 1, 1]); // => 1\n */\nfunction uniqueCountSorted(x) {\n let uniqueValueCount = 0;\n let lastSeenValue;\n for (let i = 0; i < x.length; i++) {\n if (i === 0 || x[i] !== lastSeenValue) {\n lastSeenValue = x[i];\n uniqueValueCount++;\n }\n }\n return uniqueValueCount;\n}\n\nexport default uniqueCountSorted;\n","import makeMatrix from \"./make_matrix.js\";\nimport numericSort from \"./numeric_sort.js\";\nimport uniqueCountSorted from \"./unique_count_sorted.js\";\n\n/**\n * Generates incrementally computed values based on the sums and sums of\n * squares for the data array\n *\n * @private\n * @param {number} j\n * @param {number} i\n * @param {Array} sums\n * @param {Array} sumsOfSquares\n * @return {number}\n * @example\n * ssq(0, 1, [-1, 0, 2], [1, 1, 5]);\n */\nfunction ssq(j, i, sums, sumsOfSquares) {\n let sji; // s(j, i)\n if (j > 0) {\n const muji = (sums[i] - sums[j - 1]) / (i - j + 1); // mu(j, i)\n sji =\n sumsOfSquares[i] - sumsOfSquares[j - 1] - (i - j + 1) * muji * muji;\n } else {\n sji = sumsOfSquares[i] - (sums[i] * sums[i]) / (i + 1);\n }\n if (sji < 0) {\n return 0;\n }\n return sji;\n}\n\n/**\n * Function that recursively divides and conquers computations\n * for cluster j\n *\n * @private\n * @param {number} iMin Minimum index in cluster to be computed\n * @param {number} iMax Maximum index in cluster to be computed\n * @param {number} cluster Index of the cluster currently being computed\n * @param {Array>} matrix\n * @param {Array>} backtrackMatrix\n * @param {Array} sums\n * @param {Array} sumsOfSquares\n */\nfunction fillMatrixColumn(\n iMin,\n iMax,\n cluster,\n matrix,\n backtrackMatrix,\n sums,\n sumsOfSquares,\n) {\n if (iMin > iMax) {\n return;\n }\n\n // Start at midpoint between iMin and iMax\n const i = Math.floor((iMin + iMax) / 2);\n\n matrix[cluster][i] = matrix[cluster - 1][i - 1];\n backtrackMatrix[cluster][i] = i;\n\n let jlow = cluster; // the lower end for j\n\n if (iMin > cluster) {\n jlow = Math.max(jlow, backtrackMatrix[cluster][iMin - 1] || 0);\n }\n jlow = Math.max(jlow, backtrackMatrix[cluster - 1][i] || 0);\n\n let jhigh = i - 1; // the upper end for j\n if (iMax < matrix[0].length - 1) {\n /* c8 ignore start */\n jhigh = Math.min(jhigh, backtrackMatrix[cluster][iMax + 1] || 0);\n /* c8 ignore end */\n }\n\n let sji;\n let sjlowi;\n let ssqjlow;\n let ssqj;\n for (let j = jhigh; j >= jlow; --j) {\n sji = ssq(j, i, sums, sumsOfSquares);\n\n if (sji + matrix[cluster - 1][jlow - 1] >= matrix[cluster][i]) {\n break;\n }\n\n // Examine the lower bound of the cluster border\n sjlowi = ssq(jlow, i, sums, sumsOfSquares);\n\n ssqjlow = sjlowi + matrix[cluster - 1][jlow - 1];\n\n if (ssqjlow < matrix[cluster][i]) {\n // Shrink the lower bound\n matrix[cluster][i] = ssqjlow;\n backtrackMatrix[cluster][i] = jlow;\n }\n jlow++;\n\n ssqj = sji + matrix[cluster - 1][j - 1];\n if (ssqj < matrix[cluster][i]) {\n matrix[cluster][i] = ssqj;\n backtrackMatrix[cluster][i] = j;\n }\n }\n\n fillMatrixColumn(\n iMin,\n i - 1,\n cluster,\n matrix,\n backtrackMatrix,\n sums,\n sumsOfSquares,\n );\n fillMatrixColumn(\n i + 1,\n iMax,\n cluster,\n matrix,\n backtrackMatrix,\n sums,\n sumsOfSquares,\n );\n}\n\n/**\n * Initializes the main matrices used in Ckmeans and kicks\n * off the divide and conquer cluster computation strategy\n *\n * @private\n * @param {Array} data sorted array of values\n * @param {Array>} matrix\n * @param {Array>} backtrackMatrix\n */\nfunction fillMatrices(data, matrix, backtrackMatrix) {\n const nValues = matrix[0].length;\n\n // Shift values by the median to improve numeric stability\n const shift = data[Math.floor(nValues / 2)];\n\n // Cumulative sum and cumulative sum of squares for all values in data array\n const sums = [];\n const sumsOfSquares = [];\n\n // Initialize first column in matrix & backtrackMatrix\n for (let i = 0, shiftedValue; i < nValues; ++i) {\n shiftedValue = data[i] - shift;\n if (i === 0) {\n sums.push(shiftedValue);\n sumsOfSquares.push(shiftedValue * shiftedValue);\n } else {\n sums.push(sums[i - 1] + shiftedValue);\n sumsOfSquares.push(\n sumsOfSquares[i - 1] + shiftedValue * shiftedValue,\n );\n }\n\n // Initialize for cluster = 0\n matrix[0][i] = ssq(0, i, sums, sumsOfSquares);\n backtrackMatrix[0][i] = 0;\n }\n\n // Initialize the rest of the columns\n let iMin;\n for (let cluster = 1; cluster < matrix.length; ++cluster) {\n if (cluster < matrix.length - 1) {\n iMin = cluster;\n } else {\n // No need to compute matrix[K-1][0] ... matrix[K-1][N-2]\n iMin = nValues - 1;\n }\n\n fillMatrixColumn(\n iMin,\n nValues - 1,\n cluster,\n matrix,\n backtrackMatrix,\n sums,\n sumsOfSquares,\n );\n }\n}\n\n/**\n * Ckmeans clustering is an improvement on heuristic-based clustering\n * approaches like Jenks. The algorithm was developed in\n * [Haizhou Wang and Mingzhou Song](http://journal.r-project.org/archive/2011-2/RJournal_2011-2_Wang+Song.pdf)\n * as a [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming) approach\n * to the problem of clustering numeric data into groups with the least\n * within-group sum-of-squared-deviations.\n *\n * Minimizing the difference within groups - what Wang & Song refer to as\n * `withinss`, or within sum-of-squares, means that groups are optimally\n * homogenous within and the data is split into representative groups.\n * This is very useful for visualization, where you may want to represent\n * a continuous variable in discrete color or style groups. This function\n * can provide groups that emphasize differences between data.\n *\n * Being a dynamic approach, this algorithm is based on two matrices that\n * store incrementally-computed values for squared deviations and backtracking\n * indexes.\n *\n * This implementation is based on Ckmeans 3.4.6, which introduced a new divide\n * and conquer approach that improved runtime from O(kn^2) to O(kn log(n)).\n *\n * Unlike the [original implementation](https://cran.r-project.org/web/packages/Ckmeans.1d.dp/index.html),\n * this implementation does not include any code to automatically determine\n * the optimal number of clusters: this information needs to be explicitly\n * provided.\n *\n * ### References\n * _Ckmeans.1d.dp: Optimal k-means Clustering in One Dimension by Dynamic\n * Programming_ Haizhou Wang and Mingzhou Song ISSN 2073-4859\n *\n * from The R Journal Vol. 3/2, December 2011\n * @param {Array} x input data, as an array of number values\n * @param {number} nClusters number of desired classes. This cannot be\n * greater than the number of values in the data array.\n * @returns {Array>} clustered input\n * @throws {Error} if the number of requested clusters is higher than the size of the data\n * @example\n * ckmeans([-1, 2, -1, 2, 4, 5, 6, -1, 2, -1], 3);\n * // The input, clustered into groups of similar numbers.\n * //= [[-1, -1, -1, -1], [2, 2, 2], [4, 5, 6]]);\n */\nfunction ckmeans(x, nClusters) {\n if (nClusters > x.length) {\n throw new Error(\n \"cannot generate more classes than there are data values\",\n );\n }\n\n const sorted = numericSort(x);\n // we'll use this as the maximum number of clusters\n const uniqueCount = uniqueCountSorted(sorted);\n\n // if all of the input values are identical, there's one cluster\n // with all of the input in it.\n if (uniqueCount === 1) {\n return [sorted];\n }\n\n // named 'S' originally\n const matrix = makeMatrix(nClusters, sorted.length);\n // named 'J' originally\n const backtrackMatrix = makeMatrix(nClusters, sorted.length);\n\n // This is a dynamic programming way to solve the problem of minimizing\n // within-cluster sum of squares. It's similar to linear regression\n // in this way, and this calculation incrementally computes the\n // sum of squares that are later read.\n fillMatrices(sorted, matrix, backtrackMatrix);\n\n // The real work of Ckmeans clustering happens in the matrix generation:\n // the generated matrices encode all possible clustering combinations, and\n // once they're generated we can solve for the best clustering groups\n // very quickly.\n const clusters = [];\n let clusterRight = backtrackMatrix[0].length - 1;\n\n // Backtrack the clusters from the dynamic programming matrix. This\n // starts at the bottom-right corner of the matrix (if the top-left is 0, 0),\n // and moves the cluster target with the loop.\n for (let cluster = backtrackMatrix.length - 1; cluster >= 0; cluster--) {\n const clusterLeft = backtrackMatrix[cluster][clusterRight];\n\n // fill the cluster from the sorted input by taking a slice of the\n // array. the backtrack matrix makes this easy - it stores the\n // indexes where the cluster should start and end.\n clusters[cluster] = sorted.slice(clusterLeft, clusterRight + 1);\n\n if (cluster > 0) {\n clusterRight = clusterLeft - 1;\n }\n }\n\n return clusters;\n}\n\nexport default ckmeans;\n","import mean from \"./mean.js\";\n\n/**\n * [Sample covariance](https://en.wikipedia.org/wiki/Sample_mean_and_covariance) of two datasets:\n * how much do the two datasets move together?\n * x and y are two datasets, represented as arrays of numbers.\n *\n * @param {Array} x a sample of two or more data points\n * @param {Array} y a sample of two or more data points\n * @throws {Error} if x and y do not have equal lengths\n * @throws {Error} if x or y have length of one or less\n * @returns {number} sample covariance\n * @example\n * sampleCovariance([1, 2, 3, 4, 5, 6], [6, 5, 4, 3, 2, 1]); // => -3.5\n */\nfunction sampleCovariance(x, y) {\n // The two datasets must have the same length which must be more than 1\n if (x.length !== y.length) {\n throw new Error(\"sampleCovariance requires samples with equal lengths\");\n }\n\n if (x.length < 2) {\n throw new Error(\n \"sampleCovariance requires at least two data points in each sample\",\n );\n }\n\n // determine the mean of each dataset so that we can judge each\n // value of the dataset fairly as the difference from the mean. this\n // way, if one dataset is [1, 2, 3] and [2, 3, 4], their covariance\n // does not suffer because of the difference in absolute values\n const xmean = mean(x);\n const ymean = mean(y);\n let sum = 0;\n\n // for each pair of values, the covariance increases when their\n // difference from the mean is associated - if both are well above\n // or if both are well below\n // the mean, the covariance increases significantly.\n for (let i = 0; i < x.length; i++) {\n sum += (x[i] - xmean) * (y[i] - ymean);\n }\n\n // this is Bessels' Correction: an adjustment made to sample statistics\n // that allows for the reduced degree of freedom entailed in calculating\n // values from samples rather than complete populations.\n const besselsCorrection = x.length - 1;\n\n // the covariance is weighted by the length of the datasets.\n return sum / besselsCorrection;\n}\n\nexport default sampleCovariance;\n","import sumNthPowerDeviations from \"./sum_nth_power_deviations.js\";\n\n/**\n * The [sample variance](https://en.wikipedia.org/wiki/Variance#Sample_variance)\n * is the sum of squared deviations from the mean. The sample variance\n * is distinguished from the variance by the usage of [Bessel's Correction](https://en.wikipedia.org/wiki/Bessel's_correction):\n * instead of dividing the sum of squared deviations by the length of the input,\n * it is divided by the length minus one. This corrects the bias in estimating\n * a value from a set that you don't know if full.\n *\n * References:\n * * [Wolfram MathWorld on Sample Variance](http://mathworld.wolfram.com/SampleVariance.html)\n *\n * @param {Array} x a sample of two or more data points\n * @throws {Error} if the length of x is less than 2\n * @return {number} sample variance\n * @example\n * sampleVariance([1, 2, 3, 4, 5]); // => 2.5\n */\nfunction sampleVariance(x) {\n if (x.length < 2) {\n throw new Error(\"sampleVariance requires at least two data points\");\n }\n\n const sumSquaredDeviationsValue = sumNthPowerDeviations(x, 2);\n\n // this is Bessels' Correction: an adjustment made to sample statistics\n // that allows for the reduced degree of freedom entailed in calculating\n // values from samples rather than complete populations.\n const besselsCorrection = x.length - 1;\n\n // Find the mean value of that list\n return sumSquaredDeviationsValue / besselsCorrection;\n}\n\nexport default sampleVariance;\n","import sampleVariance from \"./sample_variance.js\";\n\n/**\n * The [sample standard deviation](http://en.wikipedia.org/wiki/Standard_deviation#Sample_standard_deviation)\n * is the square root of the sample variance.\n *\n * @param {Array} x input array\n * @returns {number} sample standard deviation\n * @example\n * sampleStandardDeviation([2, 4, 4, 4, 5, 5, 7, 9]).toFixed(2);\n * // => '2.14'\n */\nfunction sampleStandardDeviation(x) {\n const sampleVarianceX = sampleVariance(x);\n return Math.sqrt(sampleVarianceX);\n}\n\nexport default sampleStandardDeviation;\n","import sampleCovariance from \"./sample_covariance.js\";\nimport sampleStandardDeviation from \"./sample_standard_deviation.js\";\n\n/**\n * The [correlation](http://en.wikipedia.org/wiki/Correlation_and_dependence) is\n * a measure of how correlated two datasets are, between -1 and 1\n *\n * @param {Array} x first input\n * @param {Array} y second input\n * @returns {number} sample correlation\n * @example\n * sampleCorrelation([1, 2, 3, 4, 5, 6], [2, 2, 3, 4, 5, 60]).toFixed(2);\n * // => '0.69'\n */\nfunction sampleCorrelation(x, y) {\n const cov = sampleCovariance(x, y);\n const xstd = sampleStandardDeviation(x);\n const ystd = sampleStandardDeviation(y);\n\n return cov / xstd / ystd;\n}\n\nexport default sampleCorrelation;\n","/**\n * When combining two lists of values for which one already knows the means,\n * one does not have to necessary recompute the mean of the combined lists in\n * linear time. They can instead use this function to compute the combined\n * mean by providing the mean & number of values of the first list and the mean\n * & number of values of the second list.\n *\n * @since 3.0.0\n * @param {number} mean1 mean of the first list\n * @param {number} n1 number of items in the first list\n * @param {number} mean2 mean of the second list\n * @param {number} n2 number of items in the second list\n * @returns {number} the combined mean\n *\n * @example\n * combineMeans(5, 3, 4, 3); // => 4.5\n */\nfunction combineMeans(mean1, n1, mean2, n2) {\n return (mean1 * n1 + mean2 * n2) / (n1 + n2);\n}\n\nexport default combineMeans;\n","import sumSimple from \"./sum_simple.js\";\n\n/**\n * The mean, _also known as average_,\n * is the sum of all values over the number of values.\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * The simple mean uses the successive addition method internally\n * to calculate it's result. Errors in floating-point addition are\n * not accounted for, so if precision is required, the standard {@link mean}\n * method should be used instead.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n *\n * @param {Array} x sample of one or more data points\n * @throws {Error} if the length of x is less than one\n * @returns {number} mean\n * @example\n * mean([0, 10]); // => 5\n */\nfunction meanSimple(x) {\n if (x.length === 0) {\n throw new Error(\"meanSimple requires at least one data point\");\n }\n\n return sumSimple(x) / x.length;\n}\n\nexport default meanSimple;\n","/**\n * The Root Mean Square (RMS) is\n * a mean function used as a measure of the magnitude of a set\n * of numbers, regardless of their sign.\n * This is the square root of the mean of the squares of the\n * input numbers.\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x a sample of one or more data points\n * @returns {number} root mean square\n * @throws {Error} if x is empty\n * @example\n * rootMeanSquare([-1, 1, -1, 1]); // => 1\n */\nfunction rootMeanSquare(x) {\n if (x.length === 0) {\n throw new Error(\"rootMeanSquare requires at least one data point\");\n }\n\n let sumOfSquares = 0;\n for (let i = 0; i < x.length; i++) {\n sumOfSquares += Math.pow(x[i], 2);\n }\n\n return Math.sqrt(sumOfSquares / x.length);\n}\n\nexport default rootMeanSquare;\n","/**\n * [Bayesian Classifier](http://en.wikipedia.org/wiki/Naive_Bayes_classifier)\n *\n * This is a naïve bayesian classifier that takes\n * singly-nested objects.\n *\n * @class\n * @example\n * var bayes = new BayesianClassifier();\n * bayes.train({\n * species: 'Cat'\n * }, 'animal');\n * var result = bayes.score({\n * species: 'Cat'\n * })\n * // result\n * // {\n * // animal: 1\n * // }\n */\nclass BayesianClassifier {\n /*:: totalCount: number */\n /*:: data: Object */\n constructor() {\n // The number of items that are currently\n // classified in the model\n this.totalCount = 0;\n // Every item classified in the model\n this.data = {};\n }\n\n /**\n * Train the classifier with a new item, which has a single\n * dimension of Javascript literal keys and values.\n *\n * @param {Object} item an object with singly-deep properties\n * @param {string} category the category this item belongs to\n * @return {undefined} adds the item to the classifier\n */\n train(item, category) {\n // If the data object doesn't have any values\n // for this category, create a new object for it.\n if (!this.data[category]) {\n this.data[category] = {};\n }\n\n // Iterate through each key in the item.\n for (const k in item) {\n const v = item[k];\n // Initialize the nested object `data[category][k][item[k]]`\n // with an object of keys that equal 0.\n if (this.data[category][k] === undefined) {\n this.data[category][k] = {};\n }\n if (this.data[category][k][v] === undefined) {\n this.data[category][k][v] = 0;\n }\n\n // And increment the key for this key/value combination.\n this.data[category][k][v]++;\n }\n\n // Increment the number of items classified\n this.totalCount++;\n }\n\n /**\n * Generate a score of how well this item matches all\n * possible categories based on its attributes\n *\n * @param {Object} item an item in the same format as with train\n * @returns {Object} of probabilities that this item belongs to a\n * given category.\n */\n score(item) {\n // Initialize an empty array of odds per category.\n const odds = {};\n let category;\n // Iterate through each key in the item,\n // then iterate through each category that has been used\n // in previous calls to `.train()`\n for (const k in item) {\n const v = item[k];\n for (category in this.data) {\n // Create an empty object for storing key - value combinations\n // for this category.\n odds[category] = {};\n\n // If this item doesn't even have a property, it counts for nothing,\n // but if it does have the property that we're looking for from\n // the item to categorize, it counts based on how popular it is\n // versus the whole population.\n if (this.data[category][k]) {\n odds[category][k + \"_\" + v] =\n (this.data[category][k][v] || 0) / this.totalCount;\n } else {\n odds[category][k + \"_\" + v] = 0;\n }\n }\n }\n\n // Set up a new object that will contain sums of these odds by category\n const oddsSums = {};\n\n for (category in odds) {\n // Tally all of the odds for each category-combination pair -\n // the non-existence of a category does not add anything to the\n // score.\n oddsSums[category] = 0;\n for (const combination in odds[category]) {\n oddsSums[category] += odds[category][combination];\n }\n }\n\n return oddsSums;\n }\n}\n\nexport default BayesianClassifier;\n","/**\n * This is a single-layer [Perceptron Classifier](http://en.wikipedia.org/wiki/Perceptron) that takes\n * arrays of numbers and predicts whether they should be classified\n * as either 0 or 1 (negative or positive examples).\n * @class\n * @example\n * // Create the model\n * var p = new PerceptronModel();\n * // Train the model with input with a diagonal boundary.\n * for (var i = 0; i < 5; i++) {\n * p.train([1, 1], 1);\n * p.train([0, 1], 0);\n * p.train([1, 0], 0);\n * p.train([0, 0], 0);\n * }\n * p.predict([0, 0]); // 0\n * p.predict([0, 1]); // 0\n * p.predict([1, 0]); // 0\n * p.predict([1, 1]); // 1\n */\nclass PerceptronModel {\n /*:: bias: number */\n /*:: weights: Array */\n constructor() {\n // The weights, or coefficients of the model;\n // weights are only populated when training with data.\n this.weights = [];\n // The bias term, or intercept; it is also a weight but\n // it's stored separately for convenience as it is always\n // multiplied by one.\n this.bias = 0;\n }\n /**\n * **Predict**: Use an array of features with the weight array and bias\n * to predict whether an example is labeled 0 or 1.\n *\n * @param {Array} features an array of features as numbers\n * @returns {number} 1 if the score is over 0, otherwise 0\n */\n predict(features) {\n // Only predict if previously trained\n // on the same size feature array(s).\n if (features.length !== this.weights.length) {\n return null;\n }\n\n // Calculate the sum of features times weights,\n // with the bias added (implicitly times one).\n let score = 0;\n for (let i = 0; i < this.weights.length; i++) {\n score += this.weights[i] * features[i];\n }\n score += this.bias;\n\n // Classify as 1 if the score is over 0, otherwise 0.\n if (score > 0) {\n return 1;\n } else {\n return 0;\n }\n }\n\n /**\n * **Train** the classifier with a new example, which is\n * a numeric array of features and a 0 or 1 label.\n *\n * @param {Array} features an array of features as numbers\n * @param {number} label either 0 or 1\n * @returns {PerceptronModel} this\n */\n train(features, label) {\n // Require that only labels of 0 or 1 are considered.\n if (label !== 0 && label !== 1) {\n return null;\n }\n // The length of the feature array determines\n // the length of the weight array.\n // The perceptron will continue learning as long as\n // it keeps seeing feature arrays of the same length.\n // When it sees a new data shape, it initializes.\n if (features.length !== this.weights.length) {\n this.weights = features;\n this.bias = 1;\n }\n // Make a prediction based on current weights.\n const prediction = this.predict(features);\n // Update the weights if the prediction is wrong.\n if (typeof prediction === \"number\" && prediction !== label) {\n const gradient = label - prediction;\n for (let i = 0; i < this.weights.length; i++) {\n this.weights[i] += gradient * features[i];\n }\n this.bias += gradient;\n }\n return this;\n }\n}\n\nexport default PerceptronModel;\n","/**\n * We use `ε`, epsilon, as a stopping criterion when we want to iterate\n * until we're \"close enough\". Epsilon is a very small number: for\n * simple statistics, that number is **0.0001**\n *\n * This is used in calculations like the binomialDistribution, in which\n * the process of finding a value is [iterative](https://en.wikipedia.org/wiki/Iterative_method):\n * it progresses until it is close enough.\n *\n * Below is an example of using epsilon in [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent),\n * where we're trying to find a local minimum of a function's derivative,\n * given by the `fDerivative` method.\n *\n * @example\n * // From calculation, we expect that the local minimum occurs at x=9/4\n * var x_old = 0;\n * // The algorithm starts at x=6\n * var x_new = 6;\n * var stepSize = 0.01;\n *\n * function fDerivative(x) {\n * return 4 * Math.pow(x, 3) - 9 * Math.pow(x, 2);\n * }\n *\n * // The loop runs until the difference between the previous\n * // value and the current value is smaller than epsilon - a rough\n * // meaure of 'close enough'\n * while (Math.abs(x_new - x_old) > ss.epsilon) {\n * x_old = x_new;\n * x_new = x_old - stepSize * fDerivative(x_old);\n * }\n *\n * console.log('Local minimum occurs at', x_new);\n */\nconst epsilon = 0.0001;\n\nexport default epsilon;\n","/**\n * A [Factorial](https://en.wikipedia.org/wiki/Factorial), usually written n!, is the product of all positive\n * integers less than or equal to n. Often factorial is implemented\n * recursively, but this iterative approach is significantly faster\n * and simpler.\n *\n * @param {number} n input, must be an integer number 1 or greater\n * @returns {number} factorial: n!\n * @throws {Error} if n is less than 0 or not an integer\n * @example\n * factorial(5); // => 120\n */\nfunction factorial(n) {\n // factorial is mathematically undefined for negative numbers\n if (n < 0) {\n throw new Error(\"factorial requires a non-negative value\");\n }\n\n if (Math.floor(n) !== n) {\n throw new Error(\"factorial requires an integer input\");\n }\n\n // typically you'll expand the factorial function going down, like\n // 5! = 5 * 4 * 3 * 2 * 1. This is going in the opposite direction,\n // counting from 2 up to the number in question, and since anything\n // multiplied by 1 is itself, the loop only needs to start at 2.\n let accumulator = 1;\n for (let i = 2; i <= n; i++) {\n // for each number up to and including the number `n`, multiply\n // the accumulator my that number.\n accumulator *= i;\n }\n return accumulator;\n}\n\nexport default factorial;\n","// Define series coefficients\nconst COEFFICIENTS = [\n 0.99999999999999709182, 57.156235665862923517, -59.597960355475491248,\n 14.136097974741747174, -0.49191381609762019978, 0.33994649984811888699e-4,\n 0.46523628927048575665e-4, -0.98374475304879564677e-4,\n 0.15808870322491248884e-3, -0.21026444172410488319e-3,\n 0.2174396181152126432e-3, -0.16431810653676389022e-3,\n 0.84418223983852743293e-4, -0.2619083840158140867e-4,\n 0.36899182659531622704e-5,\n];\n\nconst g = 607 / 128;\nconst LOGSQRT2PI = Math.log(Math.sqrt(2 * Math.PI));\n\n/**\n * Compute the logarithm of the [gamma function](https://en.wikipedia.org/wiki/Gamma_function) of a value using Lanczos' approximation.\n * This function takes as input any real-value n greater than 0.\n * This function is useful for values of n too large for the normal gamma function (n > 165).\n * The code is based on Lanczo's Gamma approximation, defined [here](http://my.fit.edu/~gabdo/gamma.txt).\n *\n * @param {number} n Any real number greater than zero.\n * @returns {number} The logarithm of gamma of the input value.\n *\n * @example\n * gammaln(500); // 2605.1158503617335\n * gammaln(2.4); // 0.21685932244884043\n */\nfunction gammaln(n) {\n // Return infinity if value not in domain\n if (n <= 0) {\n return Number.POSITIVE_INFINITY;\n }\n\n // Decrement n, because approximation is defined for n - 1\n n--;\n\n // Create series approximation\n let a = COEFFICIENTS[0];\n\n for (let i = 1; i < 15; i++) {\n a += COEFFICIENTS[i] / (n + i);\n }\n\n const tmp = g + 0.5 + n;\n\n // Return natural logarithm of gamma(n)\n return LOGSQRT2PI + Math.log(a) - tmp + (n + 0.5) * Math.log(tmp);\n}\n\nexport default gammaln;\n","/**\n * **Percentage Points of the χ2 (Chi-Squared) Distribution**\n *\n * The [χ2 (Chi-Squared) Distribution](http://en.wikipedia.org/wiki/Chi-squared_distribution) is used in the common\n * chi-squared tests for goodness of fit of an observed distribution to a theoretical one, the independence of two\n * criteria of classification of qualitative data, and in confidence interval estimation for a population standard\n * deviation of a normal distribution from a sample standard deviation.\n *\n * Values from Appendix 1, Table III of William W. Hines & Douglas C. Montgomery, \"Probability and Statistics in\n * Engineering and Management Science\", Wiley (1980).\n */\nconst chiSquaredDistributionTable = {\n 1: {\n 0.995: 0,\n 0.99: 0,\n 0.975: 0,\n 0.95: 0,\n 0.9: 0.02,\n 0.5: 0.45,\n 0.1: 2.71,\n 0.05: 3.84,\n 0.025: 5.02,\n 0.01: 6.63,\n 0.005: 7.88,\n },\n 2: {\n 0.995: 0.01,\n 0.99: 0.02,\n 0.975: 0.05,\n 0.95: 0.1,\n 0.9: 0.21,\n 0.5: 1.39,\n 0.1: 4.61,\n 0.05: 5.99,\n 0.025: 7.38,\n 0.01: 9.21,\n 0.005: 10.6,\n },\n 3: {\n 0.995: 0.07,\n 0.99: 0.11,\n 0.975: 0.22,\n 0.95: 0.35,\n 0.9: 0.58,\n 0.5: 2.37,\n 0.1: 6.25,\n 0.05: 7.81,\n 0.025: 9.35,\n 0.01: 11.34,\n 0.005: 12.84,\n },\n 4: {\n 0.995: 0.21,\n 0.99: 0.3,\n 0.975: 0.48,\n 0.95: 0.71,\n 0.9: 1.06,\n 0.5: 3.36,\n 0.1: 7.78,\n 0.05: 9.49,\n 0.025: 11.14,\n 0.01: 13.28,\n 0.005: 14.86,\n },\n 5: {\n 0.995: 0.41,\n 0.99: 0.55,\n 0.975: 0.83,\n 0.95: 1.15,\n 0.9: 1.61,\n 0.5: 4.35,\n 0.1: 9.24,\n 0.05: 11.07,\n 0.025: 12.83,\n 0.01: 15.09,\n 0.005: 16.75,\n },\n 6: {\n 0.995: 0.68,\n 0.99: 0.87,\n 0.975: 1.24,\n 0.95: 1.64,\n 0.9: 2.2,\n 0.5: 5.35,\n 0.1: 10.65,\n 0.05: 12.59,\n 0.025: 14.45,\n 0.01: 16.81,\n 0.005: 18.55,\n },\n 7: {\n 0.995: 0.99,\n 0.99: 1.25,\n 0.975: 1.69,\n 0.95: 2.17,\n 0.9: 2.83,\n 0.5: 6.35,\n 0.1: 12.02,\n 0.05: 14.07,\n 0.025: 16.01,\n 0.01: 18.48,\n 0.005: 20.28,\n },\n 8: {\n 0.995: 1.34,\n 0.99: 1.65,\n 0.975: 2.18,\n 0.95: 2.73,\n 0.9: 3.49,\n 0.5: 7.34,\n 0.1: 13.36,\n 0.05: 15.51,\n 0.025: 17.53,\n 0.01: 20.09,\n 0.005: 21.96,\n },\n 9: {\n 0.995: 1.73,\n 0.99: 2.09,\n 0.975: 2.7,\n 0.95: 3.33,\n 0.9: 4.17,\n 0.5: 8.34,\n 0.1: 14.68,\n 0.05: 16.92,\n 0.025: 19.02,\n 0.01: 21.67,\n 0.005: 23.59,\n },\n 10: {\n 0.995: 2.16,\n 0.99: 2.56,\n 0.975: 3.25,\n 0.95: 3.94,\n 0.9: 4.87,\n 0.5: 9.34,\n 0.1: 15.99,\n 0.05: 18.31,\n 0.025: 20.48,\n 0.01: 23.21,\n 0.005: 25.19,\n },\n 11: {\n 0.995: 2.6,\n 0.99: 3.05,\n 0.975: 3.82,\n 0.95: 4.57,\n 0.9: 5.58,\n 0.5: 10.34,\n 0.1: 17.28,\n 0.05: 19.68,\n 0.025: 21.92,\n 0.01: 24.72,\n 0.005: 26.76,\n },\n 12: {\n 0.995: 3.07,\n 0.99: 3.57,\n 0.975: 4.4,\n 0.95: 5.23,\n 0.9: 6.3,\n 0.5: 11.34,\n 0.1: 18.55,\n 0.05: 21.03,\n 0.025: 23.34,\n 0.01: 26.22,\n 0.005: 28.3,\n },\n 13: {\n 0.995: 3.57,\n 0.99: 4.11,\n 0.975: 5.01,\n 0.95: 5.89,\n 0.9: 7.04,\n 0.5: 12.34,\n 0.1: 19.81,\n 0.05: 22.36,\n 0.025: 24.74,\n 0.01: 27.69,\n 0.005: 29.82,\n },\n 14: {\n 0.995: 4.07,\n 0.99: 4.66,\n 0.975: 5.63,\n 0.95: 6.57,\n 0.9: 7.79,\n 0.5: 13.34,\n 0.1: 21.06,\n 0.05: 23.68,\n 0.025: 26.12,\n 0.01: 29.14,\n 0.005: 31.32,\n },\n 15: {\n 0.995: 4.6,\n 0.99: 5.23,\n 0.975: 6.27,\n 0.95: 7.26,\n 0.9: 8.55,\n 0.5: 14.34,\n 0.1: 22.31,\n 0.05: 25,\n 0.025: 27.49,\n 0.01: 30.58,\n 0.005: 32.8,\n },\n 16: {\n 0.995: 5.14,\n 0.99: 5.81,\n 0.975: 6.91,\n 0.95: 7.96,\n 0.9: 9.31,\n 0.5: 15.34,\n 0.1: 23.54,\n 0.05: 26.3,\n 0.025: 28.85,\n 0.01: 32,\n 0.005: 34.27,\n },\n 17: {\n 0.995: 5.7,\n 0.99: 6.41,\n 0.975: 7.56,\n 0.95: 8.67,\n 0.9: 10.09,\n 0.5: 16.34,\n 0.1: 24.77,\n 0.05: 27.59,\n 0.025: 30.19,\n 0.01: 33.41,\n 0.005: 35.72,\n },\n 18: {\n 0.995: 6.26,\n 0.99: 7.01,\n 0.975: 8.23,\n 0.95: 9.39,\n 0.9: 10.87,\n 0.5: 17.34,\n 0.1: 25.99,\n 0.05: 28.87,\n 0.025: 31.53,\n 0.01: 34.81,\n 0.005: 37.16,\n },\n 19: {\n 0.995: 6.84,\n 0.99: 7.63,\n 0.975: 8.91,\n 0.95: 10.12,\n 0.9: 11.65,\n 0.5: 18.34,\n 0.1: 27.2,\n 0.05: 30.14,\n 0.025: 32.85,\n 0.01: 36.19,\n 0.005: 38.58,\n },\n 20: {\n 0.995: 7.43,\n 0.99: 8.26,\n 0.975: 9.59,\n 0.95: 10.85,\n 0.9: 12.44,\n 0.5: 19.34,\n 0.1: 28.41,\n 0.05: 31.41,\n 0.025: 34.17,\n 0.01: 37.57,\n 0.005: 40,\n },\n 21: {\n 0.995: 8.03,\n 0.99: 8.9,\n 0.975: 10.28,\n 0.95: 11.59,\n 0.9: 13.24,\n 0.5: 20.34,\n 0.1: 29.62,\n 0.05: 32.67,\n 0.025: 35.48,\n 0.01: 38.93,\n 0.005: 41.4,\n },\n 22: {\n 0.995: 8.64,\n 0.99: 9.54,\n 0.975: 10.98,\n 0.95: 12.34,\n 0.9: 14.04,\n 0.5: 21.34,\n 0.1: 30.81,\n 0.05: 33.92,\n 0.025: 36.78,\n 0.01: 40.29,\n 0.005: 42.8,\n },\n 23: {\n 0.995: 9.26,\n 0.99: 10.2,\n 0.975: 11.69,\n 0.95: 13.09,\n 0.9: 14.85,\n 0.5: 22.34,\n 0.1: 32.01,\n 0.05: 35.17,\n 0.025: 38.08,\n 0.01: 41.64,\n 0.005: 44.18,\n },\n 24: {\n 0.995: 9.89,\n 0.99: 10.86,\n 0.975: 12.4,\n 0.95: 13.85,\n 0.9: 15.66,\n 0.5: 23.34,\n 0.1: 33.2,\n 0.05: 36.42,\n 0.025: 39.36,\n 0.01: 42.98,\n 0.005: 45.56,\n },\n 25: {\n 0.995: 10.52,\n 0.99: 11.52,\n 0.975: 13.12,\n 0.95: 14.61,\n 0.9: 16.47,\n 0.5: 24.34,\n 0.1: 34.28,\n 0.05: 37.65,\n 0.025: 40.65,\n 0.01: 44.31,\n 0.005: 46.93,\n },\n 26: {\n 0.995: 11.16,\n 0.99: 12.2,\n 0.975: 13.84,\n 0.95: 15.38,\n 0.9: 17.29,\n 0.5: 25.34,\n 0.1: 35.56,\n 0.05: 38.89,\n 0.025: 41.92,\n 0.01: 45.64,\n 0.005: 48.29,\n },\n 27: {\n 0.995: 11.81,\n 0.99: 12.88,\n 0.975: 14.57,\n 0.95: 16.15,\n 0.9: 18.11,\n 0.5: 26.34,\n 0.1: 36.74,\n 0.05: 40.11,\n 0.025: 43.19,\n 0.01: 46.96,\n 0.005: 49.65,\n },\n 28: {\n 0.995: 12.46,\n 0.99: 13.57,\n 0.975: 15.31,\n 0.95: 16.93,\n 0.9: 18.94,\n 0.5: 27.34,\n 0.1: 37.92,\n 0.05: 41.34,\n 0.025: 44.46,\n 0.01: 48.28,\n 0.005: 50.99,\n },\n 29: {\n 0.995: 13.12,\n 0.99: 14.26,\n 0.975: 16.05,\n 0.95: 17.71,\n 0.9: 19.77,\n 0.5: 28.34,\n 0.1: 39.09,\n 0.05: 42.56,\n 0.025: 45.72,\n 0.01: 49.59,\n 0.005: 52.34,\n },\n 30: {\n 0.995: 13.79,\n 0.99: 14.95,\n 0.975: 16.79,\n 0.95: 18.49,\n 0.9: 20.6,\n 0.5: 29.34,\n 0.1: 40.26,\n 0.05: 43.77,\n 0.025: 46.98,\n 0.01: 50.89,\n 0.005: 53.67,\n },\n 40: {\n 0.995: 20.71,\n 0.99: 22.16,\n 0.975: 24.43,\n 0.95: 26.51,\n 0.9: 29.05,\n 0.5: 39.34,\n 0.1: 51.81,\n 0.05: 55.76,\n 0.025: 59.34,\n 0.01: 63.69,\n 0.005: 66.77,\n },\n 50: {\n 0.995: 27.99,\n 0.99: 29.71,\n 0.975: 32.36,\n 0.95: 34.76,\n 0.9: 37.69,\n 0.5: 49.33,\n 0.1: 63.17,\n 0.05: 67.5,\n 0.025: 71.42,\n 0.01: 76.15,\n 0.005: 79.49,\n },\n 60: {\n 0.995: 35.53,\n 0.99: 37.48,\n 0.975: 40.48,\n 0.95: 43.19,\n 0.9: 46.46,\n 0.5: 59.33,\n 0.1: 74.4,\n 0.05: 79.08,\n 0.025: 83.3,\n 0.01: 88.38,\n 0.005: 91.95,\n },\n 70: {\n 0.995: 43.28,\n 0.99: 45.44,\n 0.975: 48.76,\n 0.95: 51.74,\n 0.9: 55.33,\n 0.5: 69.33,\n 0.1: 85.53,\n 0.05: 90.53,\n 0.025: 95.02,\n 0.01: 100.42,\n 0.005: 104.22,\n },\n 80: {\n 0.995: 51.17,\n 0.99: 53.54,\n 0.975: 57.15,\n 0.95: 60.39,\n 0.9: 64.28,\n 0.5: 79.33,\n 0.1: 96.58,\n 0.05: 101.88,\n 0.025: 106.63,\n 0.01: 112.33,\n 0.005: 116.32,\n },\n 90: {\n 0.995: 59.2,\n 0.99: 61.75,\n 0.975: 65.65,\n 0.95: 69.13,\n 0.9: 73.29,\n 0.5: 89.33,\n 0.1: 107.57,\n 0.05: 113.14,\n 0.025: 118.14,\n 0.01: 124.12,\n 0.005: 128.3,\n },\n 100: {\n 0.995: 67.33,\n 0.99: 70.06,\n 0.975: 74.22,\n 0.95: 77.93,\n 0.9: 82.36,\n 0.5: 99.33,\n 0.1: 118.5,\n 0.05: 124.34,\n 0.025: 129.56,\n 0.01: 135.81,\n 0.005: 140.17,\n },\n};\n\nexport default chiSquaredDistributionTable;\n","import interquartileRange from \"./interquartile_range.js\";\nimport stddev from \"./sample_standard_deviation.js\";\n\nconst SQRT_2PI = Math.sqrt(2 * Math.PI);\n\n/**\n * [Well-known kernels](https://en.wikipedia.org/wiki/Kernel_(statistics)#Kernel_functions_in_common_use)\n * @private\n */\nconst kernels = {\n /**\n * The gaussian kernel.\n * @private\n */\n gaussian: function (u) {\n return Math.exp(-0.5 * u * u) / SQRT_2PI;\n },\n};\n\n/**\n * Well known bandwidth selection methods\n * @private\n */\nconst bandwidthMethods = {\n /**\n * The [\"normal reference distribution\"\n * rule-of-thumb](https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/bandwidth.nrd.html),\n * a commonly used version of [Silverman's\n * rule-of-thumb](https://en.wikipedia.org/wiki/Kernel_density_estimation#A_rule-of-thumb_bandwidth_estimator).\n * @private\n */\n nrd: function (x) {\n let s = stddev(x);\n const iqr = interquartileRange(x);\n if (typeof iqr === \"number\") {\n s = Math.min(s, iqr / 1.34);\n }\n return 1.06 * s * Math.pow(x.length, -0.2);\n },\n};\n\n/**\n * [Kernel density estimation](https://en.wikipedia.org/wiki/Kernel_density_estimation)\n * is a useful tool for, among other things, estimating the shape of the\n * underlying probability distribution from a sample.\n *\n * @name kernelDensityEstimation\n * @param X sample values\n * @param kernel The kernel function to use. If a function is provided, it should return non-negative values and integrate to 1. Defaults to 'gaussian'.\n * @param bandwidthMethod The \"bandwidth selection\" method to use, or a fixed bandwidth value. Defaults to \"nrd\", the commonly-used [\"normal reference distribution\" rule-of-thumb](https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/bandwidth.nrd.html).\n * @returns {Function} An estimated [probability density function](https://en.wikipedia.org/wiki/Probability_density_function) for the given sample. The returned function runs in `O(X.length)`.\n */\nfunction kernelDensityEstimation(X, kernel, bandwidthMethod) {\n let kernelFn;\n if (kernel === undefined) {\n kernelFn = kernels.gaussian;\n } else if (typeof kernel === \"string\") {\n if (!kernels[kernel]) {\n throw new Error('Unknown kernel \"' + kernel + '\"');\n }\n kernelFn = kernels[kernel];\n } else {\n kernelFn = kernel;\n }\n\n let bandwidth;\n if (typeof bandwidthMethod === \"undefined\") {\n bandwidth = bandwidthMethods.nrd(X);\n } else if (typeof bandwidthMethod === \"string\") {\n if (!bandwidthMethods[bandwidthMethod]) {\n throw new Error(\n 'Unknown bandwidth method \"' + bandwidthMethod + '\"',\n );\n }\n bandwidth = bandwidthMethods[bandwidthMethod](X);\n } else {\n bandwidth = bandwidthMethod;\n }\n\n return function (x) {\n let i = 0;\n let sum = 0;\n for (i = 0; i < X.length; i++) {\n sum += kernelFn((x - X[i]) / bandwidth);\n }\n return sum / bandwidth / X.length;\n };\n}\n\nexport default kernelDensityEstimation;\n","const SQRT_2PI = Math.sqrt(2 * Math.PI);\n\nfunction cumulativeDistribution(z) {\n let sum = z;\n let tmp = z;\n\n // 15 iterations are enough for 4-digit precision\n for (let i = 1; i < 15; i++) {\n tmp *= (z * z) / (2 * i + 1);\n sum += tmp;\n }\n return (\n Math.round((0.5 + (sum / SQRT_2PI) * Math.exp((-z * z) / 2)) * 1e4) /\n 1e4\n );\n}\n\n/**\n * A standard normal table, also called the unit normal table or Z table,\n * is a mathematical table for the values of Φ (phi), which are the values of\n * the [cumulative distribution function](https://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution_function)\n * of the normal distribution. It is used to find the probability that a\n * statistic is observed below, above, or between values on the standard\n * normal distribution, and by extension, any normal distribution.\n */\nconst standardNormalTable = [];\n\nfor (let z = 0; z <= 3.09; z += 0.01) {\n standardNormalTable.push(cumulativeDistribution(z));\n}\n\nexport default standardNormalTable;\n","/**\n * **[Gaussian error function](http://en.wikipedia.org/wiki/Error_function)**\n *\n * The `errorFunction(x/(sd * Math.sqrt(2)))` is the probability that a value in a\n * normal distribution with standard deviation sd is within x of the mean.\n *\n * This function returns a numerical approximation to the exact value.\n * It uses Horner's method to evaluate the polynomial of τ (tau).\n *\n * @param {number} x input\n * @return {number} error estimation\n * @example\n * errorFunction(1).toFixed(2); // => '0.84'\n */\nfunction errorFunction(x) {\n const t = 1 / (1 + 0.5 * Math.abs(x));\n const tau =\n t *\n Math.exp(\n -x * x +\n ((((((((0.17087277 * t - 0.82215223) * t + 1.48851587) * t -\n 1.13520398) *\n t +\n 0.27886807) *\n t -\n 0.18628806) *\n t +\n 0.09678418) *\n t +\n 0.37409196) *\n t +\n 1.00002368) *\n t -\n 1.26551223,\n );\n if (x >= 0) {\n return 1 - tau;\n } else {\n return tau - 1;\n }\n}\n\nexport default errorFunction;\n","/**\n * The Inverse [Gaussian error function](http://en.wikipedia.org/wiki/Error_function)\n * returns a numerical approximation to the value that would have caused\n * `errorFunction()` to return x.\n *\n * @param {number} x value of error function\n * @returns {number} estimated inverted value\n */\nfunction inverseErrorFunction(x) {\n const a = (8 * (Math.PI - 3)) / (3 * Math.PI * (4 - Math.PI));\n\n const inv = Math.sqrt(\n Math.sqrt(\n Math.pow(2 / (Math.PI * a) + Math.log(1 - x * x) / 2, 2) -\n Math.log(1 - x * x) / a,\n ) -\n (2 / (Math.PI * a) + Math.log(1 - x * x) / 2),\n );\n\n if (x >= 0) {\n return inv;\n } else {\n return -inv;\n }\n}\n\nexport default inverseErrorFunction;\n","/**\n * [Sign](https://en.wikipedia.org/wiki/Sign_function) is a function\n * that extracts the sign of a real number\n *\n * @param {number} x input value\n * @returns {number} sign value either 1, 0 or -1\n * @throws {TypeError} if the input argument x is not a number\n * @private\n *\n * @example\n * sign(2); // => 1\n */\nfunction sign(x) {\n if (typeof x === \"number\") {\n if (x < 0) {\n return -1;\n } else if (x === 0) {\n return 0;\n } else {\n return 1;\n }\n } else {\n throw new TypeError(\"not a number\");\n }\n}\n\nexport default sign;\n","/**\n * Calculate Euclidean distance between two points.\n * @param {Array} left First N-dimensional point.\n * @param {Array} right Second N-dimensional point.\n * @returns {number} Distance.\n */\nfunction euclideanDistance(left, right) {\n let sum = 0;\n for (let i = 0; i < left.length; i++) {\n const diff = left[i] - right[i];\n sum += diff * diff;\n }\n return Math.sqrt(sum);\n}\n\nexport default euclideanDistance;\n","import euclideanDistance from \"./euclidean_distance.js\";\nimport makeMatrix from \"./make_matrix.js\";\nimport sample from \"./sample.js\";\n\n/**\n * @typedef {Object} kMeansReturn\n * @property {Array} labels The labels.\n * @property {Array>} centroids The cluster centroids.\n */\n\n/**\n * Perform k-means clustering.\n *\n * @param {Array>} points N-dimensional coordinates of points to be clustered.\n * @param {number} numCluster How many clusters to create.\n * @param {Function} randomSource An optional entropy source that generates uniform values in [0, 1).\n * @return {kMeansReturn} Labels (same length as data) and centroids (same length as numCluster).\n * @throws {Error} If any centroids wind up friendless (i.e., without associated points).\n *\n * @example\n * kMeansCluster([[0.0, 0.5], [1.0, 0.5]], 2); // => {labels: [0, 1], centroids: [[0.0, 0.5], [1.0 0.5]]}\n */\nfunction kMeansCluster(points, numCluster, randomSource = Math.random) {\n let oldCentroids = null;\n let newCentroids = sample(points, numCluster, randomSource);\n let labels = null;\n let change = Number.MAX_VALUE;\n while (change !== 0) {\n labels = labelPoints(points, newCentroids);\n oldCentroids = newCentroids;\n newCentroids = calculateCentroids(points, labels, numCluster);\n change = calculateChange(newCentroids, oldCentroids);\n }\n return {\n labels: labels,\n centroids: newCentroids,\n };\n}\n\n/**\n * Label each point according to which centroid it is closest to.\n *\n * @private\n * @param {Array>} points Array of XY coordinates.\n * @param {Array>} centroids Current centroids.\n * @return {Array} Group labels.\n */\nfunction labelPoints(points, centroids) {\n return points.map((p) => {\n let minDist = Number.MAX_VALUE;\n let label = -1;\n for (let i = 0; i < centroids.length; i++) {\n const dist = euclideanDistance(p, centroids[i]);\n if (dist < minDist) {\n minDist = dist;\n label = i;\n }\n }\n return label;\n });\n}\n\n/**\n * Calculate centroids for points given labels.\n *\n * @private\n * @param {Array>} points Array of XY coordinates.\n * @param {Array} labels Which groups points belong to.\n * @param {number} numCluster Number of clusters being created.\n * @return {Array>} Centroid for each group.\n * @throws {Error} If any centroids wind up friendless (i.e., without associated points).\n */\nfunction calculateCentroids(points, labels, numCluster) {\n // Initialize accumulators.\n const dimension = points[0].length;\n const centroids = makeMatrix(numCluster, dimension);\n const counts = Array(numCluster).fill(0);\n\n // Add points to centroids' accumulators and count points per centroid.\n const numPoints = points.length;\n for (let i = 0; i < numPoints; i++) {\n const point = points[i];\n const label = labels[i];\n const current = centroids[label];\n for (let j = 0; j < dimension; j++) {\n current[j] += point[j];\n }\n counts[label] += 1;\n }\n\n // Rescale centroids, checking for any that have no points.\n for (let i = 0; i < numCluster; i++) {\n if (counts[i] === 0) {\n throw new Error(`Centroid ${i} has no friends`);\n }\n const centroid = centroids[i];\n for (let j = 0; j < dimension; j++) {\n centroid[j] /= counts[i];\n }\n }\n\n return centroids;\n}\n\n/**\n * Calculate the difference between old centroids and new centroids.\n *\n * @private\n * @param {Array>} left One list of centroids.\n * @param {Array>} right Another list of centroids.\n * @return {number} Distance between centroids.\n */\nfunction calculateChange(left, right) {\n let total = 0;\n for (let i = 0; i < left.length; i++) {\n total += euclideanDistance(left[i], right[i]);\n }\n return total;\n}\n\nexport default kMeansCluster;\n","import euclideanDistance from \"./euclidean_distance.js\";\nimport makeMatrix from \"./make_matrix.js\";\nimport max from \"./max.js\";\n\n/**\n * Calculate the [silhouette values](https://en.wikipedia.org/wiki/Silhouette_(clustering))\n * for clustered data.\n *\n * @param {Array>} points N-dimensional coordinates of points.\n * @param {Array} labels Labels of points. This must be the same length as `points`,\n * and values must lie in [0..G-1], where G is the number of groups.\n * @return {Array} The silhouette value for each point.\n *\n * @example\n * silhouette([[0.25], [0.75]], [0, 0]); // => [1.0, 1.0]\n */\nfunction silhouette(points, labels) {\n if (points.length !== labels.length) {\n throw new Error(\"must have exactly as many labels as points\");\n }\n const groupings = createGroups(labels);\n const distances = calculateAllDistances(points);\n const result = [];\n for (let i = 0; i < points.length; i++) {\n let s = 0;\n if (groupings[labels[i]].length > 1) {\n const a = meanDistanceFromPointToGroup(\n i,\n groupings[labels[i]],\n distances,\n );\n const b = meanDistanceToNearestGroup(\n i,\n labels,\n groupings,\n distances,\n );\n s = (b - a) / Math.max(a, b);\n }\n result.push(s);\n }\n return result;\n}\n\n/**\n * Create a lookup table mapping group IDs to point IDs.\n *\n * @private\n * @param {Array} labels Labels of points. This must be the same length as `points`,\n * and values must lie in [0..G-1], where G is the number of groups.\n * @return {Array>} An array of length G, each of whose entries is an array\n * containing the indices of the points in that group.\n */\nfunction createGroups(labels) {\n const numGroups = 1 + max(labels);\n const result = Array(numGroups);\n for (let i = 0; i < labels.length; i++) {\n const label = labels[i];\n if (result[label] === undefined) {\n result[label] = [];\n }\n result[label].push(i);\n }\n return result;\n}\n\n/**\n * Create a lookup table of all inter-point distances.\n *\n * @private\n * @param {Array>} points N-dimensional coordinates of points.\n * @return {Array>} A symmetric square array of inter-point distances\n * (zero on the diagonal).\n */\nfunction calculateAllDistances(points) {\n const numPoints = points.length;\n const result = makeMatrix(numPoints, numPoints);\n for (let i = 0; i < numPoints; i++) {\n for (let j = 0; j < i; j++) {\n result[i][j] = euclideanDistance(points[i], points[j]);\n result[j][i] = result[i][j];\n }\n }\n return result;\n}\n\n/**\n * Calculate the mean distance between this point and all the points in the\n * nearest group (as determined by which point in another group is closest).\n *\n * @private\n * @param {number} which The index of this point.\n * @param {Array} labels Labels of points.\n * @param {Array>} groupings An array whose entries are arrays\n * containing the indices of the points in that group.\n * @param {Array>} distances A symmetric square array of inter-point\n * distances.\n * @return {number} The mean distance from this point to others in the nearest\n * group.\n */\nfunction meanDistanceToNearestGroup(which, labels, groupings, distances) {\n const label = labels[which];\n let result = Number.MAX_VALUE;\n for (let i = 0; i < groupings.length; i++) {\n if (i !== label) {\n const d = meanDistanceFromPointToGroup(\n which,\n groupings[i],\n distances,\n );\n if (d < result) {\n result = d;\n }\n }\n }\n return result;\n}\n\n/**\n * Calculate the mean distance between a point and all the points in a group\n * (possibly its own).\n *\n * @private\n * @param {number} which The index of this point.\n * @param {Array} group The indices of all the points in the group in\n * question.\n * @param {Array>} distances A symmetric square array of inter-point\n * distances.\n * @return {number} The mean distance from this point to others in the\n * specified group.\n */\nfunction meanDistanceFromPointToGroup(which, group, distances) {\n let total = 0;\n for (let i = 0; i < group.length; i++) {\n total += distances[which][group[i]];\n }\n return total / group.length;\n}\n\nexport default silhouette;\n","/**\n * Relative error.\n *\n * This is more difficult to calculate than it first appears [1,2]. The usual\n * formula for the relative error between an actual value A and an expected\n * value E is `|(A-E)/E|`, but:\n *\n * 1. If the expected value is 0, any other value has infinite relative error,\n * which is counter-intuitive: if the expected voltage is 0, getting 1/10th\n * of a volt doesn't feel like an infinitely large error.\n *\n * 2. This formula does not satisfy the mathematical definition of a metric [3].\n * [4] solved this problem by defining the relative error as `|ln(|A/E|)|`,\n * but that formula only works if all values are positive: for example, it\n * reports the relative error of -10 and 10 as 0.\n *\n * Our implementation sticks with convention and returns:\n *\n * - 0 if the actual and expected values are both zero\n * - Infinity if the actual value is non-zero and the expected value is zero\n * - `|(A-E)/E|` in all other cases\n *\n * [1] https://math.stackexchange.com/questions/677852/how-to-calculate-relative-error-when-true-value-is-zero\n * [2] https://en.wikipedia.org/wiki/Relative_change_and_difference\n * [3] https://en.wikipedia.org/wiki/Metric_(mathematics)#Definition\n * [4] F.W.J. Olver: \"A New Approach to Error Arithmetic.\" SIAM Journal on\n * Numerical Analysis, 15(2), 1978, 10.1137/0715024.\n *\n * @param {number} actual The actual value.\n * @param {number} expected The expected value.\n * @return {number} The relative error.\n */\nfunction relativeError(actual, expected) {\n // These lines are actually covered by tests, but it seems\n // like c8 has a bug that marks them as not covered.\n /* c8 ignore start */\n if (actual === 0 && expected === 0) {\n return 0;\n }\n /* c8 ignore end */\n return Math.abs((actual - expected) / expected);\n}\n\nexport default relativeError;\n","/**\n * When adding a new value to a list, one does not have to necessary\n * recompute the mean of the list in linear time. They can instead use\n * this function to compute the new mean by providing the current mean,\n * the number of elements in the list that produced it and the new\n * value to add.\n *\n * @since 2.5.0\n * @param {number} mean current mean\n * @param {number} n number of items in the list\n * @param {number} newValue the added value\n * @returns {number} the new mean\n *\n * @example\n * addToMean(14, 5, 53); // => 20.5\n */\nfunction addToMean(mean, n, newValue) {\n return mean + (newValue - mean) / (n + 1);\n}\n\nexport default addToMean;\n","import epsilon from \"./epsilon.js\";\nimport relativeError from \"./relative_error.js\";\n\n/**\n * Approximate equality.\n *\n * @param {number} actual The value to be tested.\n * @param {number} expected The reference value.\n * @param {number} tolerance The acceptable relative difference.\n * @return {boolean} Whether numbers are within tolerance.\n */\nfunction approxEqual(actual, expected, tolerance = epsilon) {\n return relativeError(actual, expected) <= tolerance;\n}\n\nexport default approxEqual;\n","/**\n * The [Bernoulli distribution](http://en.wikipedia.org/wiki/Bernoulli_distribution)\n * is the probability discrete\n * distribution of a random variable which takes value 1 with success\n * probability `p` and value 0 with failure\n * probability `q` = 1 - `p`. It can be used, for example, to represent the\n * toss of a coin, where \"1\" is defined to mean \"heads\" and \"0\" is defined\n * to mean \"tails\" (or vice versa). It is\n * a special case of a Binomial Distribution\n * where `n` = 1.\n *\n * @param {number} p input value, between 0 and 1 inclusive\n * @returns {number[]} values of bernoulli distribution at this point\n * @throws {Error} if p is outside 0 and 1\n * @example\n * bernoulliDistribution(0.3); // => [0.7, 0.3]\n */\nfunction bernoulliDistribution(p) /*: number[] */ {\n // Check that `p` is a valid probability (0 ≤ p ≤ 1)\n if (p < 0 || p > 1) {\n throw new Error(\n \"bernoulliDistribution requires probability to be between 0 and 1 inclusive\",\n );\n }\n\n return [1 - p, p];\n}\n\nexport default bernoulliDistribution;\n","import epsilon from \"./epsilon.js\";\n\n/**\n * The [Binomial Distribution](http://en.wikipedia.org/wiki/Binomial_distribution) is the discrete probability\n * distribution of the number of successes in a sequence of n independent yes/no experiments, each of which yields\n * success with probability `probability`. Such a success/failure experiment is also called a Bernoulli experiment or\n * Bernoulli trial; when trials = 1, the Binomial Distribution is a Bernoulli Distribution.\n *\n * @param {number} trials number of trials to simulate\n * @param {number} probability\n * @returns {number[]} output\n */\nfunction binomialDistribution(trials, probability) /*: ?number[] */ {\n // Check that `p` is a valid probability (0 ≤ p ≤ 1),\n // that `n` is an integer, strictly positive.\n if (probability < 0 || probability > 1 || trials <= 0 || trials % 1 !== 0) {\n return undefined;\n }\n\n // We initialize `x`, the random variable, and `accumulator`, an accumulator\n // for the cumulative distribution function to 0. `distribution_functions`\n // is the object we'll return with the `probability_of_x` and the\n // `cumulativeProbability_of_x`, as well as the calculated mean &\n // variance. We iterate until the `cumulativeProbability_of_x` is\n // within `epsilon` of 1.0.\n let x = 0;\n let cumulativeProbability = 0;\n const cells = [];\n let binomialCoefficient = 1;\n\n // This algorithm iterates through each potential outcome,\n // until the `cumulativeProbability` is very close to 1, at\n // which point we've defined the vast majority of outcomes\n do {\n // a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)\n cells[x] =\n binomialCoefficient *\n Math.pow(probability, x) *\n Math.pow(1 - probability, trials - x);\n cumulativeProbability += cells[x];\n x++;\n binomialCoefficient = (binomialCoefficient * (trials - x + 1)) / x;\n // when the cumulativeProbability is nearly 1, we've calculated\n // the useful range of this distribution\n } while (cumulativeProbability < 1 - epsilon);\n\n return cells;\n}\n\nexport default binomialDistribution;\n","import sign from \"./sign.js\";\n\n/**\n * [Bisection method](https://en.wikipedia.org/wiki/Bisection_method) is a root-finding\n * method that repeatedly bisects an interval to find the root.\n *\n * This function returns a numerical approximation to the exact value.\n *\n * @param {Function} func input function\n * @param {number} start - start of interval\n * @param {number} end - end of interval\n * @param {number} maxIterations - the maximum number of iterations\n * @param {number} errorTolerance - the error tolerance\n * @returns {number} estimated root value\n * @throws {TypeError} Argument func must be a function\n *\n * @example\n * bisect(Math.cos,0,4,100,0.003); // => 1.572265625\n */\nfunction bisect(func, start, end, maxIterations, errorTolerance) {\n if (typeof func !== \"function\")\n throw new TypeError(\"func must be a function\");\n\n for (let i = 0; i < maxIterations; i++) {\n const output = (start + end) / 2;\n\n if (\n func(output) === 0 ||\n Math.abs((end - start) / 2) < errorTolerance\n ) {\n return output;\n }\n\n if (sign(func(output)) === sign(func(start))) {\n start = output;\n } else {\n end = output;\n }\n }\n\n throw new Error(\"maximum number of iterations exceeded\");\n}\n\nexport default bisect;\n","import chiSquaredDistributionTable from \"./chi_squared_distribution_table.js\";\nimport mean from \"./mean.js\";\n\n/**\n * The [χ2 (Chi-Squared) Goodness-of-Fit Test](http://en.wikipedia.org/wiki/Goodness_of_fit#Pearson.27s_chi-squared_test)\n * uses a measure of goodness of fit which is the sum of differences between observed and expected outcome frequencies\n * (that is, counts of observations), each squared and divided by the number of observations expected given the\n * hypothesized distribution. The resulting χ2 statistic, `chiSquared`, can be compared to the chi-squared distribution\n * to determine the goodness of fit. In order to determine the degrees of freedom of the chi-squared distribution, one\n * takes the total number of observed frequencies and subtracts the number of estimated parameters. The test statistic\n * follows, approximately, a chi-square distribution with (k − c) degrees of freedom where `k` is the number of non-empty\n * cells and `c` is the number of estimated parameters for the distribution.\n *\n * @param {Array} data\n * @param {Function} distributionType a function that returns a point in a distribution:\n * for instance, binomial, bernoulli, or poisson\n * @param {number} significance\n * @returns {number} chi squared goodness of fit\n * @example\n * // Data from Poisson goodness-of-fit example 10-19 in William W. Hines & Douglas C. Montgomery,\n * // \"Probability and Statistics in Engineering and Management Science\", Wiley (1980).\n * var data1019 = [\n * 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n * 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n * 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n * 2, 2, 2, 2, 2, 2, 2, 2, 2,\n * 3, 3, 3, 3\n * ];\n * ss.chiSquaredGoodnessOfFit(data1019, ss.poissonDistribution, 0.05); //= false\n */\nfunction chiSquaredGoodnessOfFit(data, distributionType, significance) {\n // Estimate from the sample data, a weighted mean.\n const inputMean = mean(data);\n // Calculated value of the χ2 statistic.\n let chiSquared = 0;\n // Number of hypothesized distribution parameters estimated, expected to be supplied in the distribution test.\n // Lose one degree of freedom for estimating `lambda` from the sample data.\n const c = 1;\n // The hypothesized distribution.\n // Generate the hypothesized distribution.\n const hypothesizedDistribution = distributionType(inputMean);\n const observedFrequencies = [];\n const expectedFrequencies = [];\n\n // Create an array holding a histogram from the sample data, of\n // the form `{ value: numberOfOcurrences }`\n for (let i = 0; i < data.length; i++) {\n if (observedFrequencies[data[i]] === undefined) {\n observedFrequencies[data[i]] = 0;\n }\n observedFrequencies[data[i]]++;\n }\n\n // The histogram we created might be sparse - there might be gaps\n // between values. So we iterate through the histogram, making\n // sure that instead of undefined, gaps have 0 values.\n for (let i = 0; i < observedFrequencies.length; i++) {\n if (observedFrequencies[i] === undefined) {\n observedFrequencies[i] = 0;\n }\n }\n\n // Create an array holding a histogram of expected data given the\n // sample size and hypothesized distribution.\n for (const k in hypothesizedDistribution) {\n if (k in observedFrequencies) {\n expectedFrequencies[+k] = hypothesizedDistribution[k] * data.length;\n }\n }\n\n // Working backward through the expected frequencies, collapse classes\n // if less than three observations are expected for a class.\n // This transformation is applied to the observed frequencies as well.\n for (let k = expectedFrequencies.length - 1; k >= 0; k--) {\n if (expectedFrequencies[k] < 3) {\n expectedFrequencies[k - 1] += expectedFrequencies[k];\n expectedFrequencies.pop();\n\n observedFrequencies[k - 1] += observedFrequencies[k];\n observedFrequencies.pop();\n }\n }\n\n // Iterate through the squared differences between observed & expected\n // frequencies, accumulating the `chiSquared` statistic.\n for (let k = 0; k < observedFrequencies.length; k++) {\n chiSquared +=\n Math.pow(observedFrequencies[k] - expectedFrequencies[k], 2) /\n expectedFrequencies[k];\n }\n\n // Calculate degrees of freedom for this test and look it up in the\n // `chiSquaredDistributionTable` in order to\n // accept or reject the goodness-of-fit of the hypothesized distribution.\n // Degrees of freedom, calculated as (number of class intervals -\n // number of hypothesized distribution parameters estimated - 1)\n const degreesOfFreedom = observedFrequencies.length - c - 1;\n return (\n chiSquaredDistributionTable[degreesOfFreedom][significance] < chiSquared\n );\n}\n\nexport default chiSquaredGoodnessOfFit;\n","/**\n * Split an array into chunks of a specified size. This function\n * has the same behavior as [PHP's array_chunk](http://php.net/manual/en/function.array-chunk.php)\n * function, and thus will insert smaller-sized chunks at the end if\n * the input size is not divisible by the chunk size.\n *\n * `x` is expected to be an array, and `chunkSize` a number.\n * The `x` array can contain any kind of data.\n *\n * @param {Array} x a sample\n * @param {number} chunkSize size of each output array. must be a positive integer\n * @returns {Array} a chunked array\n * @throws {Error} if chunk size is less than 1 or not an integer\n * @example\n * chunk([1, 2, 3, 4, 5, 6], 2);\n * // => [[1, 2], [3, 4], [5, 6]]\n */\nfunction chunk(x, chunkSize) {\n // a list of result chunks, as arrays in an array\n const output = [];\n\n // `chunkSize` must be zero or higher - otherwise the loop below,\n // in which we call `start += chunkSize`, will loop infinitely.\n // So, we'll detect and throw in that case to indicate\n // invalid input.\n if (chunkSize < 1) {\n throw new Error(\"chunk size must be a positive number\");\n }\n\n if (Math.floor(chunkSize) !== chunkSize) {\n throw new Error(\"chunk size must be an integer\");\n }\n\n // `start` is the index at which `.slice` will start selecting\n // new array elements\n for (let start = 0; start < x.length; start += chunkSize) {\n // for each chunk, slice that part of the array and add it\n // to the output. The `.slice` function does not change\n // the original array.\n output.push(x.slice(start, start + chunkSize));\n }\n return output;\n}\n\nexport default chunk;\n","import mean from \"./mean.js\";\nimport sampleStandardDeviation from \"./sample_standard_deviation.js\";\n\n/**\n * The`coefficient of variation`_ is the ratio of the standard deviation to the mean.\n * .._`coefficient of variation`: https://en.wikipedia.org/wiki/Coefficient_of_variation\n *\n *\n * @param {Array} x input\n * @returns {number} coefficient of variation\n * @example\n * coefficientOfVariation([1, 2, 3, 4]).toFixed(3); // => 0.516\n * coefficientOfVariation([1, 2, 3, 4, 5]).toFixed(3); // => 0.527\n * coefficientOfVariation([-1, 0, 1, 2, 3, 4]).toFixed(3); // => 1.247\n */\nfunction coefficientOfVariation(x) {\n return sampleStandardDeviation(x) / mean(x);\n}\n\nexport default coefficientOfVariation;\n","/**\n * Implementation of Combinations\n * Combinations are unique subsets of a collection - in this case, k x from a collection at a time.\n * https://en.wikipedia.org/wiki/Combination\n * @param {Array} x any type of data\n * @param {int} k the number of objects in each group (without replacement)\n * @returns {Array} array of permutations\n * @example\n * combinations([1, 2, 3], 2); // => [[1,2], [1,3], [2,3]]\n */\n\nfunction combinations(x, k) {\n let i;\n let subI;\n const combinationList = [];\n let subsetCombinations;\n let next;\n\n for (i = 0; i < x.length; i++) {\n if (k === 1) {\n combinationList.push([x[i]]);\n } else {\n subsetCombinations = combinations(x.slice(i + 1, x.length), k - 1);\n for (subI = 0; subI < subsetCombinations.length; subI++) {\n next = subsetCombinations[subI];\n next.unshift(x[i]);\n combinationList.push(next);\n }\n }\n }\n return combinationList;\n}\n\nexport default combinations;\n","/**\n * Implementation of [Combinations](https://en.wikipedia.org/wiki/Combination) with replacement\n * Combinations are unique subsets of a collection - in this case, k x from a collection at a time.\n * 'With replacement' means that a given element can be chosen multiple times.\n * Unlike permutation, order doesn't matter for combinations.\n *\n * @param {Array} x any type of data\n * @param {int} k the number of objects in each group (without replacement)\n * @returns {Array} array of permutations\n * @example\n * combinationsReplacement([1, 2], 2); // => [[1, 1], [1, 2], [2, 2]]\n */\nfunction combinationsReplacement(x, k) {\n const combinationList = [];\n\n for (let i = 0; i < x.length; i++) {\n if (k === 1) {\n // If we're requested to find only one element, we don't need\n // to recurse: just push `x[i]` onto the list of combinations.\n combinationList.push([x[i]]);\n } else {\n // Otherwise, recursively find combinations, given `k - 1`. Note that\n // we request `k - 1`, so if you were looking for k=3 combinations, we're\n // requesting k=2. This -1 gets reversed in the for loop right after this\n // code, since we concatenate `x[i]` onto the selected combinations,\n // bringing `k` back up to your requested level.\n // This recursion may go many levels deep, since it only stops once\n // k=1.\n const subsetCombinations = combinationsReplacement(\n x.slice(i, x.length),\n k - 1,\n );\n\n for (let j = 0; j < subsetCombinations.length; j++) {\n combinationList.push([x[i]].concat(subsetCombinations[j]));\n }\n }\n }\n\n return combinationList;\n}\n\nexport default combinationsReplacement;\n","import combineMeans from \"./combine_means.js\";\n\n/**\n * When combining two lists of values for which one already knows the variances,\n * one does not have to necessary recompute the variance of the combined lists\n * in linear time. They can instead use this function to compute the combined\n * variance by providing the variance, mean & number of values of the first list\n * and the variance, mean & number of values of the second list.\n *\n * @since 3.0.0\n * @param {number} variance1 variance of the first list\n * @param {number} mean1 mean of the first list\n * @param {number} n1 number of items in the first list\n * @param {number} variance2 variance of the second list\n * @param {number} mean2 mean of the second list\n * @param {number} n2 number of items in the second list\n * @returns {number} the combined mean\n *\n * @example\n * combineVariances(14 / 3, 5, 3, 8 / 3, 4, 3); // => 47 / 12\n */\nfunction combineVariances(variance1, mean1, n1, variance2, mean2, n2) {\n const newMean = combineMeans(mean1, n1, mean2, n2);\n\n return (\n (n1 * (variance1 + Math.pow(mean1 - newMean, 2)) +\n n2 * (variance2 + Math.pow(mean2 - newMean, 2))) /\n (n1 + n2)\n );\n}\n\nexport default combineVariances;\n","/**\n * **[Logistic Cumulative Distribution Function](https://en.wikipedia.org/wiki/Logistic_distribution)**\n *\n * @param {number} x\n * @returns {number} cumulative standard logistic probability\n */\nfunction cumulativeStdLogisticProbability(x) {\n return 1 / (Math.exp(-x) + 1);\n}\n\nexport default cumulativeStdLogisticProbability;\n","import standardNormalTable from \"./standard_normal_table.js\";\n\n/**\n * **[Cumulative Standard Normal Probability](http://en.wikipedia.org/wiki/Standard_normal_table)**\n *\n * Since probability tables cannot be\n * printed for every normal distribution, as there are an infinite variety\n * of normal distributions, it is common practice to convert a normal to a\n * standard normal and then use the standard normal table to find probabilities.\n *\n * You can use `.5 + .5 * errorFunction(x / Math.sqrt(2))` to calculate the probability\n * instead of looking it up in a table.\n *\n * @param {number} z\n * @returns {number} cumulative standard normal probability\n */\nfunction cumulativeStdNormalProbability(z) {\n // Calculate the position of this value.\n const absZ = Math.abs(z);\n // Each row begins with a different\n // significant digit: 0.5, 0.6, 0.7, and so on. Each value in the table\n // corresponds to a range of 0.01 in the input values, so the value is\n // multiplied by 100.\n const index = Math.min(\n Math.round(absZ * 100),\n standardNormalTable.length - 1,\n );\n\n // The index we calculate must be in the table as a positive value,\n // but we still pay attention to whether the input is positive\n // or negative, and flip the output value as a last step.\n if (z >= 0) {\n return standardNormalTable[index];\n } else {\n // due to floating-point arithmetic, values in the table with\n // 4 significant figures can nevertheless end up as repeating\n // fractions when they're computed here.\n return Math.round((1 - standardNormalTable[index]) * 1e4) / 1e4;\n }\n}\n\nexport default cumulativeStdNormalProbability;\n","import max from \"./max.js\";\nimport min from \"./min.js\";\n\n/**\n * Given an array of x, this will find the extent of the\n * x and return an array of breaks that can be used\n * to categorize the x into a number of classes. The\n * returned array will always be 1 longer than the number of\n * classes because it includes the minimum value.\n *\n * @param {Array} x an array of number values\n * @param {number} nClasses number of desired classes\n * @returns {Array} array of class break positions\n * @example\n * equalIntervalBreaks([1, 2, 3, 4, 5, 6], 4); // => [1, 2.25, 3.5, 4.75, 6]\n */\nfunction equalIntervalBreaks(x, nClasses) {\n if (x.length < 2) {\n return x;\n }\n\n const theMin = min(x);\n const theMax = max(x);\n\n // the first break will always be the minimum value\n // in the xset\n const breaks = [theMin];\n\n // The size of each break is the full range of the x\n // divided by the number of classes requested\n const breakSize = (theMax - theMin) / nClasses;\n\n // In the case of nClasses = 1, this loop won't run\n // and the returned breaks will be [min, max]\n for (let i = 1; i < nClasses; i++) {\n breaks.push(breaks[0] + breakSize * i);\n }\n\n // the last break will always be the\n // maximum.\n breaks.push(theMax);\n\n return breaks;\n}\n\nexport default equalIntervalBreaks;\n","/**\n * This computes the minimum & maximum number in an array.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @returns {Array} minimum & maximum value\n * @throws {Error} if the length of x is less than one\n * @example\n * extent([1, 2, 3, 4]);\n * // => [1, 4]\n */\nfunction extent(x) {\n if (x.length === 0) {\n throw new Error(\"extent requires at least one data point\");\n }\n\n let min = x[0];\n let max = x[0];\n for (let i = 1; i < x.length; i++) {\n if (x[i] > max) {\n max = x[i];\n }\n if (x[i] < min) {\n min = x[i];\n }\n }\n return [min, max];\n}\n\nexport default extent;\n","/**\n * The extent is the lowest & highest number in the array. With a sorted array,\n * the first element in the array is always the lowest while the last element is always the largest, so this calculation\n * can be done in one step, or constant time.\n *\n * @param {Array} x input\n * @returns {Array} minimum & maximum value\n * @example\n * extentSorted([-100, -10, 1, 2, 5]); // => [-100, 5]\n */\nfunction extentSorted(x) {\n return [x[0], x[x.length - 1]];\n}\n\nexport default extentSorted;\n","import factorial from \"./factorial.js\";\n\n/**\n * Compute the [gamma function](https://en.wikipedia.org/wiki/Gamma_function) of a value using Nemes' approximation.\n * The gamma of n is equivalent to (n-1)!, but unlike the factorial function, gamma is defined for all real n except zero\n * and negative integers (where NaN is returned). Note, the gamma function is also well-defined for complex numbers,\n * though this implementation currently does not handle complex numbers as input values.\n * Nemes' approximation is defined [here](https://arxiv.org/abs/1003.6020) as Theorem 2.2.\n * Negative values use [Euler's reflection formula](https://en.wikipedia.org/wiki/Gamma_function#Properties) for computation.\n *\n * @param {number} n Any real number except for zero and negative integers.\n * @returns {number} The gamma of the input value.\n *\n * @example\n * gamma(11.5); // 11899423.084037038\n * gamma(-11.5); // 2.29575810481609e-8\n * gamma(5); // 24\n */\nfunction gamma(n) {\n if (Number.isInteger(n)) {\n if (n <= 0) {\n // gamma not defined for zero or negative integers\n return Number.NaN;\n } else {\n // use factorial for integer inputs\n return factorial(n - 1);\n }\n }\n\n // Decrement n, because approximation is defined for n - 1\n n--;\n\n if (n < 0) {\n // Use Euler's reflection formula for negative inputs\n // see: https://en.wikipedia.org/wiki/Gamma_function#Properties\n return Math.PI / (Math.sin(Math.PI * -n) * gamma(-n));\n } else {\n // Nemes' expansion approximation\n const seriesCoefficient =\n Math.pow(n / Math.E, n) * Math.sqrt(2 * Math.PI * (n + 1 / 6));\n\n const seriesDenom = n + 1 / 4;\n\n const seriesExpansion =\n 1 +\n 1 / 144 / Math.pow(seriesDenom, 2) -\n 1 / 12960 / Math.pow(seriesDenom, 3) -\n 257 / 207360 / Math.pow(seriesDenom, 4) -\n 52 / 2612736 / Math.pow(seriesDenom, 5) +\n 5741173 / 9405849600 / Math.pow(seriesDenom, 6) +\n 37529 / 18811699200 / Math.pow(seriesDenom, 7);\n\n return seriesCoefficient * seriesExpansion;\n }\n}\n\nexport default gamma;\n","/**\n * The [Geometric Mean](https://en.wikipedia.org/wiki/Geometric_mean) is\n * a mean function that is more useful for numbers in different\n * ranges.\n *\n * This is the nth root of the input numbers multiplied by each other.\n *\n * The geometric mean is often useful for\n * **[proportional growth](https://en.wikipedia.org/wiki/Geometric_mean#Proportional_growth)**: given\n * growth rates for multiple years, like _80%, 16.66% and 42.85%_, a simple\n * mean will incorrectly estimate an average growth rate, whereas a geometric\n * mean will correctly estimate a growth rate that, over those years,\n * will yield the same end value.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @returns {number} geometric mean\n * @throws {Error} if x is empty\n * @throws {Error} if x contains a negative number\n * @example\n * var growthRates = [1.80, 1.166666, 1.428571];\n * var averageGrowth = ss.geometricMean(growthRates);\n * var averageGrowthRates = [averageGrowth, averageGrowth, averageGrowth];\n * var startingValue = 10;\n * var startingValueMean = 10;\n * growthRates.forEach(function(rate) {\n * startingValue *= rate;\n * });\n * averageGrowthRates.forEach(function(rate) {\n * startingValueMean *= rate;\n * });\n * startingValueMean === startingValue;\n */\nfunction geometricMean(x) {\n if (x.length === 0) {\n throw new Error(\"geometricMean requires at least one data point\");\n }\n\n // the starting value.\n let value = 1;\n\n for (let i = 0; i < x.length; i++) {\n // the geometric mean is only valid for positive numbers\n if (x[i] < 0) {\n throw new Error(\n \"geometricMean requires only non-negative numbers as input\",\n );\n }\n\n // repeatedly multiply the value by each number\n value *= x[i];\n }\n\n return Math.pow(value, 1 / x.length);\n}\n\nexport default geometricMean;\n","/**\n * The [Harmonic Mean](https://en.wikipedia.org/wiki/Harmonic_mean) is\n * a mean function typically used to find the average of rates.\n * This mean is calculated by taking the reciprocal of the arithmetic mean\n * of the reciprocals of the input numbers.\n *\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x sample of one or more data points\n * @returns {number} harmonic mean\n * @throws {Error} if x is empty\n * @throws {Error} if x contains a negative number\n * @example\n * harmonicMean([2, 3]).toFixed(2) // => '2.40'\n */\nfunction harmonicMean(x) {\n if (x.length === 0) {\n throw new Error(\"harmonicMean requires at least one data point\");\n }\n\n let reciprocalSum = 0;\n\n for (let i = 0; i < x.length; i++) {\n // the harmonic mean is only valid for positive numbers\n if (x[i] <= 0) {\n throw new Error(\n \"harmonicMean requires only positive numbers as input\",\n );\n }\n\n reciprocalSum += 1 / x[i];\n }\n\n // divide n by the reciprocal sum\n return x.length / reciprocalSum;\n}\n\nexport default harmonicMean;\n","import jenksBreaks from \"./jenks_breaks.js\";\nimport jenksMatrices from \"./jenks_matrices.js\";\n\n/**\n * The **[jenks natural breaks optimization](http://en.wikipedia.org/wiki/Jenks_natural_breaks_optimization)**\n * is an algorithm commonly used in cartography and visualization to decide\n * upon groupings of data values that minimize variance within themselves\n * and maximize variation between themselves.\n *\n * For instance, cartographers often use jenks in order to choose which\n * values are assigned to which colors in a [choropleth](https://en.wikipedia.org/wiki/Choropleth_map)\n * map.\n *\n * @param {Array} data input data, as an array of number values\n * @param {number} nClasses number of desired classes\n * @returns {Array} array of class break positions\n * // split data into 3 break points\n * jenks([1, 2, 4, 5, 7, 9, 10, 20], 3) // = [1, 7, 20, 20]\n */\nfunction jenks(data, nClasses) {\n if (nClasses > data.length) {\n return null;\n }\n\n // sort data in numerical order, since this is expected\n // by the matrices function\n data = data.slice().sort(function (a, b) {\n return a - b;\n });\n\n // get our basic matrices\n const matrices = jenksMatrices(data, nClasses);\n // we only need lower class limits here\n const lowerClassLimits = matrices.lowerClassLimits;\n\n // extract nClasses out of the computed matrices\n return jenksBreaks(data, lowerClassLimits, nClasses);\n}\n\nexport default jenks;\n","/*\n * Compute Matrices for Jenks\n *\n * Compute the matrices required for Jenks breaks. These matrices\n * can be used for any classing of data with `classes <= nClasses`\n *\n * @private\n */\nfunction jenksMatrices(data, nClasses) {\n // in the original implementation, these matrices are referred to\n // as `LC` and `OP`\n //\n // * lowerClassLimits (LC): optimal lower class limits\n // * varianceCombinations (OP): optimal variance combinations for all classes\n const lowerClassLimits = [];\n const varianceCombinations = [];\n // loop counters\n let i;\n let j;\n // the variance, as computed at each step in the calculation\n let variance = 0;\n\n // Initialize and fill each matrix with zeroes\n for (i = 0; i < data.length + 1; i++) {\n const tmp1 = [];\n const tmp2 = [];\n // despite these arrays having the same values, we need\n // to keep them separate so that changing one does not change\n // the other\n for (j = 0; j < nClasses + 1; j++) {\n tmp1.push(0);\n tmp2.push(0);\n }\n lowerClassLimits.push(tmp1);\n varianceCombinations.push(tmp2);\n }\n\n for (i = 1; i < nClasses + 1; i++) {\n lowerClassLimits[1][i] = 1;\n varianceCombinations[1][i] = 0;\n // in the original implementation, 9999999 is used but\n // since Javascript has `Infinity`, we use that.\n for (j = 2; j < data.length + 1; j++) {\n varianceCombinations[j][i] = Number.POSITIVE_INFINITY;\n }\n }\n\n for (let l = 2; l < data.length + 1; l++) {\n // `SZ` originally. this is the sum of the values seen thus\n // far when calculating variance.\n let sum = 0;\n // `ZSQ` originally. the sum of squares of values seen\n // thus far\n let sumSquares = 0;\n // `WT` originally. This is the number of\n let w = 0;\n // `IV` originally\n let i4 = 0;\n\n // in several instances, you could say `Math.pow(x, 2)`\n // instead of `x * x`, but this is slower in some browsers\n // introduces an unnecessary concept.\n for (let m = 1; m < l + 1; m++) {\n // `III` originally\n const lowerClassLimit = l - m + 1;\n const val = data[lowerClassLimit - 1];\n\n // here we're estimating variance for each potential classing\n // of the data, for each potential number of classes. `w`\n // is the number of data points considered so far.\n w++;\n\n // increase the current sum and sum-of-squares\n sum += val;\n sumSquares += val * val;\n\n // the variance at this point in the sequence is the difference\n // between the sum of squares and the total x 2, over the number\n // of samples.\n variance = sumSquares - (sum * sum) / w;\n\n i4 = lowerClassLimit - 1;\n\n if (i4 !== 0) {\n for (j = 2; j < nClasses + 1; j++) {\n // if adding this element to an existing class\n // will increase its variance beyond the limit, break\n // the class at this point, setting the `lowerClassLimit`\n // at this point.\n if (\n varianceCombinations[l][j] >=\n variance + varianceCombinations[i4][j - 1]\n ) {\n lowerClassLimits[l][j] = lowerClassLimit;\n varianceCombinations[l][j] =\n variance + varianceCombinations[i4][j - 1];\n }\n }\n }\n }\n\n lowerClassLimits[l][1] = 1;\n varianceCombinations[l][1] = variance;\n }\n\n // return the two matrices. for just providing breaks, only\n // `lowerClassLimits` is needed, but variances can be useful to\n // evaluate goodness of fit.\n return {\n lowerClassLimits: lowerClassLimits,\n varianceCombinations: varianceCombinations,\n };\n}\n\nexport default jenksMatrices;\n","/*\n * Pull Breaks Values for Jenks\n *\n * the second part of the jenks recipe: take the calculated matrices\n * and derive an array of n breaks.\n *\n * @private\n */\nfunction jenksBreaks(data, lowerClassLimits, nClasses) {\n let k = data.length;\n const kclass = [];\n let countNum = nClasses;\n\n // the calculation of classes will never include the upper\n // bound, so we need to explicitly set it\n kclass[nClasses] = data[data.length - 1];\n\n // the lowerClassLimits matrix is used as indices into itself\n // here: the `k` variable is reused in each iteration.\n while (countNum > 0) {\n kclass[countNum - 1] = data[lowerClassLimits[k][countNum] - 1];\n k = lowerClassLimits[k][countNum] - 1;\n countNum--;\n }\n\n return kclass;\n}\n\nexport default jenksBreaks;\n","/**\n * [Simple linear regression](http://en.wikipedia.org/wiki/Simple_linear_regression)\n * is a simple way to find a fitted line\n * between a set of coordinates. This algorithm finds the slope and y-intercept of a regression line\n * using the least sum of squares.\n *\n * @param {Array>} data an array of two-element of arrays,\n * like `[[0, 1], [2, 3]]`\n * @returns {Object} object containing slope and intersect of regression line\n * @example\n * linearRegression([[0, 0], [1, 1]]); // => { m: 1, b: 0 }\n */\nfunction linearRegression(data) {\n let m;\n let b;\n\n // Store data length in a local variable to reduce\n // repeated object property lookups\n const dataLength = data.length;\n\n //if there's only one point, arbitrarily choose a slope of 0\n //and a y-intercept of whatever the y of the initial point is\n if (dataLength === 1) {\n m = 0;\n b = data[0][1];\n } else {\n // Initialize our sums and scope the `m` and `b`\n // variables that define the line.\n let sumX = 0;\n let sumY = 0;\n let sumXX = 0;\n let sumXY = 0;\n\n // Use local variables to grab point values\n // with minimal object property lookups\n let point;\n let x;\n let y;\n\n // Gather the sum of all x values, the sum of all\n // y values, and the sum of x^2 and (x*y) for each\n // value.\n //\n // In math notation, these would be SS_x, SS_y, SS_xx, and SS_xy\n for (let i = 0; i < dataLength; i++) {\n point = data[i];\n x = point[0];\n y = point[1];\n\n sumX += x;\n sumY += y;\n\n sumXX += x * x;\n sumXY += x * y;\n }\n\n // `m` is the slope of the regression line\n m =\n (dataLength * sumXY - sumX * sumY) /\n (dataLength * sumXX - sumX * sumX);\n\n // `b` is the y-intercept of the line.\n b = sumY / dataLength - (m * sumX) / dataLength;\n }\n\n // Return both values as an object.\n return {\n m: m,\n b: b,\n };\n}\n\nexport default linearRegression;\n","/**\n * Given the output of `linearRegression`: an object\n * with `m` and `b` values indicating slope and intercept,\n * respectively, generate a line function that translates\n * x values into y values.\n *\n * @param {Object} mb object with `m` and `b` members, representing\n * slope and intersect of desired line\n * @returns {Function} method that computes y-value at any given\n * x-value on the line.\n * @example\n * var l = linearRegressionLine(linearRegression([[0, 0], [1, 1]]));\n * l(0) // = 0\n * l(2) // = 2\n * linearRegressionLine({ b: 0, m: 1 })(1); // => 1\n * linearRegressionLine({ b: 1, m: 1 })(1); // => 2\n */\nfunction linearRegressionLine(mb /*: { b: number, m: number }*/) {\n // Return a function that computes a `y` value for each\n // x value it is given, based on the values of `b` and `a`\n // that we just computed.\n return function (x) {\n return mb.b + mb.m * x;\n };\n}\n\nexport default linearRegressionLine;\n","/**\n * The [log average](https://en.wikipedia.org/wiki/https://en.wikipedia.org/wiki/Geometric_mean#Relationship_with_logarithms)\n * is an equivalent way of computing the geometric mean of an array suitable for large or small products.\n *\n * It's found by calculating the average logarithm of the elements and exponentiating.\n *\n * @param {Array} x sample of one or more data points\n * @returns {number} geometric mean\n * @throws {Error} if x is empty\n * @throws {Error} if x contains a negative number\n */\nfunction logAverage(x) {\n if (x.length === 0) {\n throw new Error(\"logAverage requires at least one data point\");\n }\n\n let value = 0;\n for (let i = 0; i < x.length; i++) {\n if (x[i] < 0) {\n throw new Error(\n \"logAverage requires only non-negative numbers as input\",\n );\n }\n value += Math.log(x[i]);\n }\n\n return Math.exp(value / x.length);\n}\n\nexport default logAverage;\n","/**\n * The [Logit](https://en.wikipedia.org/wiki/Logit)\n * is the inverse of cumulativeStdLogisticProbability,\n * and is also known as the logistic quantile function.\n *\n * @param {number} p\n * @returns {number} logit\n */\nfunction logit(p) {\n if (p <= 0 || p >= 1) {\n throw new Error(\"p must be strictly between zero and one\");\n }\n return Math.log(p / (1 - p));\n}\n\nexport default logit;\n","/**\n * The maximum is the highest number in the array. With a sorted array,\n * the last element in the array is always the largest, so this calculation\n * can be done in one step, or constant time.\n *\n * @param {Array} x input\n * @returns {number} maximum value\n * @example\n * maxSorted([-100, -10, 1, 2, 5]); // => 5\n */\nfunction maxSorted(x) {\n return x[x.length - 1];\n}\n\nexport default maxSorted;\n","import quantileSorted from \"./quantile_sorted.js\";\n\n/**\n * The [median](http://en.wikipedia.org/wiki/Median) is\n * the middle number of a list. This is often a good indicator of 'the middle'\n * when there are outliers that skew the `mean()` value.\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * The median isn't necessarily one of the elements in the list: the value\n * can be the average of two elements if the list has an even length\n * and the two central values are different.\n *\n * @param {Array} sorted input\n * @returns {number} median value\n * @example\n * medianSorted([10, 2, 5, 100, 2, 1]); // => 52.5\n */\nfunction medianSorted(sorted) {\n return quantileSorted(sorted, 0.5);\n}\n\nexport default medianSorted;\n","/**\n * The minimum is the lowest number in the array. With a sorted array,\n * the first element in the array is always the smallest, so this calculation\n * can be done in one step, or constant time.\n *\n * @param {Array} x input\n * @returns {number} minimum value\n * @example\n * minSorted([-100, -10, 1, 2, 5]); // => -100\n */\nfunction minSorted(x) {\n return x[0];\n}\n\nexport default minSorted;\n","import modeSorted from \"./mode_sorted.js\";\nimport numericSort from \"./numeric_sort.js\";\n\n/**\n * The [mode](https://en.wikipedia.org/wiki/Mode_%28statistics%29) is the number\n * that appears in a list the highest number of times.\n * There can be multiple modes in a list: in the event of a tie, this\n * algorithm will return the most recently seen mode.\n *\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * This runs in `O(n log(n))` because it needs to sort the array internally\n * before running an `O(n)` search to find the mode.\n *\n * @param {Array} x input\n * @returns {number} mode\n * @example\n * mode([0, 0, 1]); // => 0\n */\nfunction mode(x) {\n // Sorting the array lets us iterate through it below and be sure\n // that every time we see a new number it's new and we'll never\n // see the same number twice\n return modeSorted(numericSort(x));\n}\n\nexport default mode;\n","/* globals Map: false */\n\n/**\n * The [mode](https://en.wikipedia.org/wiki/Mode_%28statistics%29) is the number\n * that appears in a list the highest number of times.\n * There can be multiple modes in a list: in the event of a tie, this\n * algorithm will return the most recently seen mode.\n *\n * modeFast uses a Map object to keep track of the mode, instead of the approach\n * used with `mode`, a sorted array. As a result, it is faster\n * than `mode` and supports any data type that can be compared with `==`.\n * It also requires a\n * [JavaScript environment with support for Map](https://kangax.github.io/compat-table/es6/#test-Map),\n * and will throw an error if Map is not available.\n *\n * This is a [measure of central tendency](https://en.wikipedia.org/wiki/Central_tendency):\n * a method of finding a typical or central value of a set of numbers.\n *\n * @param {Array<*>} x a sample of one or more data points\n * @returns {?*} mode\n * @throws {ReferenceError} if the JavaScript environment doesn't support Map\n * @throws {Error} if x is empty\n * @example\n * modeFast(['rabbits', 'rabbits', 'squirrels']); // => 'rabbits'\n */\nfunction modeFast(x) {\n // This index will reflect the incidence of different values, indexing\n // them like\n // { value: count }\n const index = new Map();\n\n // A running `mode` and the number of times it has been encountered.\n let mode;\n let modeCount = 0;\n\n for (let i = 0; i < x.length; i++) {\n let newCount = index.get(x[i]);\n if (newCount === undefined) {\n newCount = 1;\n } else {\n newCount++;\n }\n if (newCount > modeCount) {\n mode = x[i];\n modeCount = newCount;\n }\n index.set(x[i], newCount);\n }\n\n if (modeCount === 0) {\n throw new Error(\"mode requires at last one data point\");\n }\n\n return mode;\n}\n\nexport default modeFast;\n","import mean from \"./mean.js\";\nimport shuffleInPlace from \"./shuffle_in_place.js\";\n\n/**\n * Conducts a [permutation test](https://en.wikipedia.org/wiki/Resampling_(statistics)#Permutation_tests)\n * to determine if two data sets are *significantly* different from each other, using\n * the difference of means between the groups as the test statistic.\n * The function allows for the following hypotheses:\n * - two_tail = Null hypothesis: the two distributions are equal.\n * - greater = Null hypothesis: observations from sampleX tend to be smaller than those from sampleY.\n * - less = Null hypothesis: observations from sampleX tend to be greater than those from sampleY.\n * [Learn more about one-tail vs two-tail tests.](https://en.wikipedia.org/wiki/One-_and_two-tailed_tests)\n *\n * @param {Array} sampleX first dataset (e.g. treatment data)\n * @param {Array} sampleY second dataset (e.g. control data)\n * @param {string} alternative alternative hypothesis, either 'two_sided' (default), 'greater', or 'less'\n * @param {number} k number of values in permutation distribution.\n * @param {Function} [randomSource=Math.random] an optional entropy source\n * @returns {number} p-value The probability of observing the difference between groups (as or more extreme than what we did), assuming the null hypothesis.\n *\n * @example\n * var control = [2, 5, 3, 6, 7, 2, 5];\n * var treatment = [20, 5, 13, 12, 7, 2, 2];\n * permutationTest(control, treatment); // ~0.1324\n */\nfunction permutationTest(sampleX, sampleY, alternative, k, randomSource) {\n // Set default arguments\n if (k === undefined) {\n k = 10000;\n }\n if (alternative === undefined) {\n alternative = \"two_side\";\n }\n if (\n alternative !== \"two_side\" &&\n alternative !== \"greater\" &&\n alternative !== \"less\"\n ) {\n throw new Error(\n \"`alternative` must be either 'two_side', 'greater', or 'less'.\",\n );\n }\n\n // get means for each sample\n const meanX = mean(sampleX);\n const meanY = mean(sampleY);\n\n // calculate initial test statistic. This will be our point of comparison with\n // the generated test statistics.\n const testStatistic = meanX - meanY;\n\n // create test-statistic distribution\n const testStatDsn = new Array(k);\n\n // combine datsets so we can easily shuffle later\n const allData = sampleX.concat(sampleY);\n const midIndex = Math.floor(allData.length / 2);\n\n for (let i = 0; i < k; i++) {\n // 1. shuffle data assignments\n shuffleInPlace(allData, randomSource);\n const permLeft = allData.slice(0, midIndex);\n const permRight = allData.slice(midIndex, allData.length);\n\n // 2.re-calculate test statistic\n const permTestStatistic = mean(permLeft) - mean(permRight);\n\n // 3. store test statistic to build test statistic distribution\n testStatDsn[i] = permTestStatistic;\n }\n\n // Calculate p-value depending on alternative\n // For this test, we calculate the percentage of 'extreme' test statistics (subject to our hypothesis)\n // more info on permutation test p-value calculations: https://onlinecourses.science.psu.edu/stat464/node/35\n let numExtremeTStats = 0;\n if (alternative === \"two_side\") {\n for (let i = 0; i <= k; i++) {\n if (Math.abs(testStatDsn[i]) >= Math.abs(testStatistic)) {\n numExtremeTStats += 1;\n }\n }\n } else if (alternative === \"greater\") {\n for (let i = 0; i <= k; i++) {\n if (testStatDsn[i] >= testStatistic) {\n numExtremeTStats += 1;\n }\n }\n } else {\n // alternative === 'less'\n for (let i = 0; i <= k; i++) {\n /* c8 ignore start */\n if (testStatDsn[i] <= testStatistic) {\n numExtremeTStats += 1;\n }\n /* c8 ignore end */\n }\n }\n\n return numExtremeTStats / k;\n}\n\nexport default permutationTest;\n","/**\n * Implementation of [Heap's Algorithm](https://en.wikipedia.org/wiki/Heap%27s_algorithm)\n * for generating permutations.\n *\n * @param {Array} elements any type of data\n * @returns {Array} array of permutations\n */\nfunction permutationsHeap(elements) {\n const indexes = new Array(elements.length);\n const permutations = [elements.slice()];\n\n for (let i = 0; i < elements.length; i++) {\n indexes[i] = 0;\n }\n\n for (let i = 0; i < elements.length; ) {\n if (indexes[i] < i) {\n // At odd indexes, swap from indexes[i] instead\n // of from the beginning of the array\n let swapFrom = 0;\n if (i % 2 !== 0) {\n swapFrom = indexes[i];\n }\n\n // swap between swapFrom and i, using\n // a temporary variable as storage.\n const temp = elements[swapFrom];\n elements[swapFrom] = elements[i];\n elements[i] = temp;\n\n permutations.push(elements.slice());\n indexes[i]++;\n i = 0;\n } else {\n indexes[i] = 0;\n i++;\n }\n }\n\n return permutations;\n}\n\nexport default permutationsHeap;\n","import epsilon from \"./epsilon.js\";\n\n/**\n * The [Poisson Distribution](http://en.wikipedia.org/wiki/Poisson_distribution)\n * is a discrete probability distribution that expresses the probability\n * of a given number of events occurring in a fixed interval of time\n * and/or space if these events occur with a known average rate and\n * independently of the time since the last event.\n *\n * The Poisson Distribution is characterized by the strictly positive\n * mean arrival or occurrence rate, `λ`.\n *\n * @param {number} lambda location poisson distribution\n * @returns {number[]} values of poisson distribution at that point\n */\nfunction poissonDistribution(lambda) /*: ?number[] */ {\n // Check that lambda is strictly positive\n if (lambda <= 0) {\n return undefined;\n }\n\n // our current place in the distribution\n let x = 0;\n // and we keep track of the current cumulative probability, in\n // order to know when to stop calculating chances.\n let cumulativeProbability = 0;\n // the calculated cells to be returned\n const cells = [];\n let factorialX = 1;\n\n // This algorithm iterates through each potential outcome,\n // until the `cumulativeProbability` is very close to 1, at\n // which point we've defined the vast majority of outcomes\n do {\n // a [probability mass function](https://en.wikipedia.org/wiki/Probability_mass_function)\n cells[x] = (Math.exp(-lambda) * Math.pow(lambda, x)) / factorialX;\n cumulativeProbability += cells[x];\n x++;\n factorialX *= x;\n // when the cumulativeProbability is nearly 1, we've calculated\n // the useful range of this distribution\n } while (cumulativeProbability < 1 - epsilon);\n\n return cells;\n}\n\nexport default poissonDistribution;\n","import epsilon from \"./epsilon.js\";\nimport inverseErrorFunction from \"./inverse_error_function.js\";\n\n/**\n * The [Probit](http://en.wikipedia.org/wiki/Probit)\n * is the inverse of cumulativeStdNormalProbability(),\n * and is also known as the normal quantile function.\n *\n * It returns the number of standard deviations from the mean\n * where the p'th quantile of values can be found in a normal distribution.\n * So, for example, probit(0.5 + 0.6827/2) ≈ 1 because 68.27% of values are\n * normally found within 1 standard deviation above or below the mean.\n *\n * @param {number} p\n * @returns {number} probit\n */\nfunction probit(p) {\n if (p === 0) {\n p = epsilon;\n } else if (p >= 1) {\n p = 1 - epsilon;\n }\n return Math.sqrt(2) * inverseErrorFunction(2 * p - 1);\n}\n\nexport default probit;\n","/**\n * The [product](https://en.wikipedia.org/wiki/Product_(mathematics)) of an array\n * is the result of multiplying all numbers together, starting using one as the multiplicative identity.\n *\n * This runs in `O(n)`, linear time, with respect to the length of the array.\n *\n * @param {Array} x input\n * @return {number} product of all input numbers\n * @example\n * product([1, 2, 3, 4]); // => 24\n */\nfunction product(x) {\n let value = 1;\n for (let i = 0; i < x.length; i++) {\n value *= x[i];\n }\n return value;\n}\n\nexport default product;\n","import numericSort from \"./numeric_sort.js\";\nimport quantileRankSorted from \"./quantile_rank_sorted.js\";\n\n/**\n * This function returns the quantile in which one would find the given value in\n * the given array. It will copy and sort your array before each run, so\n * if you know your array is already sorted, you should use `quantileRankSorted`\n * instead.\n *\n * @param {Array} x input\n * @returns {number} value value\n * @example\n * quantileRank([4, 3, 1, 2], 3); // => 0.75\n * quantileRank([4, 3, 2, 3, 1], 3); // => 0.7\n * quantileRank([2, 4, 1, 3], 6); // => 1\n * quantileRank([5, 3, 1, 2, 3], 4); // => 0.8\n */\nfunction quantileRank(x, value) {\n // Cloning and sorting the array\n const sortedCopy = numericSort(x);\n\n return quantileRankSorted(sortedCopy, value);\n}\n\nexport default quantileRank;\n","/**\n * The [R Squared](http://en.wikipedia.org/wiki/Coefficient_of_determination)\n * value of data compared with a function `f`\n * is the sum of the squared differences between the prediction\n * and the actual value.\n *\n * @param {Array>} x input data: this should be doubly-nested\n * @param {Function} func function called on `[i][0]` values within the dataset\n * @returns {number} r-squared value\n * @example\n * var samples = [[0, 0], [1, 1]];\n * var regressionLine = linearRegressionLine(linearRegression(samples));\n * rSquared(samples, regressionLine); // = 1 this line is a perfect fit\n */\nfunction rSquared(x, func) {\n if (x.length < 2) {\n return 1;\n }\n\n // Compute the average y value for the actual\n // data set in order to compute the\n // _total sum of squares_\n let sum = 0;\n for (let i = 0; i < x.length; i++) {\n sum += x[i][1];\n }\n const average = sum / x.length;\n\n // Compute the total sum of squares - the\n // squared difference between each point\n // and the average of all points.\n let sumOfSquares = 0;\n for (let j = 0; j < x.length; j++) {\n sumOfSquares += Math.pow(average - x[j][1], 2);\n }\n\n // Finally estimate the error: the squared\n // difference between the estimate and the actual data\n // value at each point.\n let err = 0;\n for (let k = 0; k < x.length; k++) {\n err += Math.pow(x[k][1] - func(x[k][0]), 2);\n }\n\n // As the error grows larger, its ratio to the\n // sum of squares increases and the r squared\n // value grows lower.\n return 1 - err / sumOfSquares;\n}\n\nexport default rSquared;\n","import mean from \"./mean.js\";\n\n/**\n * [Kurtosis](http://en.wikipedia.org/wiki/Kurtosis) is\n * a measure of the heaviness of a distribution's tails relative to its\n * variance. The kurtosis value can be positive or negative, or even undefined.\n *\n * Implementation is based on Fisher's excess kurtosis definition and uses\n * unbiased moment estimators. This is the version found in Excel and available\n * in several statistical packages, including SAS and SciPy.\n *\n * @param {Array} x a sample of 4 or more data points\n * @returns {number} sample kurtosis\n * @throws {Error} if x has length less than 4\n * @example\n * sampleKurtosis([1, 2, 2, 3, 5]); // => 1.4555765595463122\n */\nfunction sampleKurtosis(x) {\n const n = x.length;\n\n if (n < 4) {\n throw new Error(\"sampleKurtosis requires at least four data points\");\n }\n\n const meanValue = mean(x);\n let tempValue;\n let secondCentralMoment = 0;\n let fourthCentralMoment = 0;\n\n for (let i = 0; i < n; i++) {\n tempValue = x[i] - meanValue;\n secondCentralMoment += tempValue * tempValue;\n fourthCentralMoment += tempValue * tempValue * tempValue * tempValue;\n }\n\n return (\n ((n - 1) / ((n - 2) * (n - 3))) *\n ((n * (n + 1) * fourthCentralMoment) /\n (secondCentralMoment * secondCentralMoment) -\n 3 * (n - 1))\n );\n}\n\nexport default sampleKurtosis;\n","import sampleCorrelation from \"./sample_correlation.js\";\n\n/**\n * The [rank correlation](https://en.wikipedia.org/wiki/Rank_correlation) is\n * a measure of the strength of monotonic relationship between two arrays\n *\n * @param {Array} x first input\n * @param {Array} y second input\n * @returns {number} sample rank correlation\n */\nfunction sampleRankCorrelation(x, y) {\n const xIndexes = x\n .map((value, index) => [value, index])\n .sort((a, b) => a[0] - b[0])\n .map((pair) => pair[1]);\n const yIndexes = y\n .map((value, index) => [value, index])\n .sort((a, b) => a[0] - b[0])\n .map((pair) => pair[1]);\n\n // At this step, we have an array of indexes\n // that map from sorted numbers to their original indexes. We reverse\n // that so that it is an array of the sorted destination index.\n const xRanks = Array(xIndexes.length);\n const yRanks = Array(xIndexes.length);\n for (let i = 0; i < xIndexes.length; i++) {\n xRanks[xIndexes[i]] = i;\n yRanks[yIndexes[i]] = i;\n }\n\n return sampleCorrelation(xRanks, yRanks);\n}\n\nexport default sampleRankCorrelation;\n","import mean from \"./mean.js\";\n\n/**\n * [Skewness](http://en.wikipedia.org/wiki/Skewness) is\n * a measure of the extent to which a probability distribution of a\n * real-valued random variable \"leans\" to one side of the mean.\n * The skewness value can be positive or negative, or even undefined.\n *\n * Implementation is based on the adjusted Fisher-Pearson standardized\n * moment coefficient, which is the version found in Excel and several\n * statistical packages including Minitab, SAS and SPSS.\n *\n * @since 4.1.0\n * @param {Array} x a sample of 3 or more data points\n * @returns {number} sample skewness\n * @throws {Error} if x has length less than 3\n * @example\n * sampleSkewness([2, 4, 6, 3, 1]); // => 0.590128656384365\n */\nfunction sampleSkewness(x) {\n if (x.length < 3) {\n throw new Error(\"sampleSkewness requires at least three data points\");\n }\n\n const meanValue = mean(x);\n let tempValue;\n let sumSquaredDeviations = 0;\n let sumCubedDeviations = 0;\n\n for (let i = 0; i < x.length; i++) {\n tempValue = x[i] - meanValue;\n sumSquaredDeviations += tempValue * tempValue;\n sumCubedDeviations += tempValue * tempValue * tempValue;\n }\n\n // this is Bessels' Correction: an adjustment made to sample statistics\n // that allows for the reduced degree of freedom entailed in calculating\n // values from samples rather than complete populations.\n const besselsCorrection = x.length - 1;\n\n // Find the mean value of that list\n const theSampleStandardDeviation = Math.sqrt(\n sumSquaredDeviations / besselsCorrection,\n );\n\n const n = x.length;\n const cubedS = Math.pow(theSampleStandardDeviation, 3);\n\n return (n * sumCubedDeviations) / ((n - 1) * (n - 2) * cubedS);\n}\n\nexport default sampleSkewness;\n","/**\n * Sampling with replacement is a type of sampling that allows the same\n * item to be picked out of a population more than once.\n *\n * @param {Array<*>} x an array of any kind of value\n * @param {number} n count of how many elements to take\n * @param {Function} [randomSource=Math.random] an optional entropy source that\n * returns numbers between 0 inclusive and 1 exclusive: the range [0, 1)\n * @return {Array} n sampled items from the population\n * @example\n * var values = [1, 2, 3, 4];\n * sampleWithReplacement(values, 2); // returns 2 random values, like [2, 4];\n */\nfunction sampleWithReplacement(x, n, randomSource) {\n if (x.length === 0) {\n return [];\n }\n\n // a custom random number source can be provided if you want to use\n // a fixed seed or another random number generator, like\n // [random-js](https://www.npmjs.org/package/random-js)\n randomSource = randomSource || Math.random;\n\n const length = x.length;\n const sample = [];\n\n for (let i = 0; i < n; i++) {\n const index = Math.floor(randomSource() * length);\n\n sample.push(x[index]);\n }\n\n return sample;\n}\n\nexport default sampleWithReplacement;\n","import max from \"./max.js\";\nimport silhouette from \"./silhouette.js\";\n\n/**\n * Calculate the [silhouette metric](https://en.wikipedia.org/wiki/Silhouette_(clustering))\n * for a set of N-dimensional points arranged in groups. The metric is the largest\n * individual silhouette value for the data.\n *\n * @param {Array>} points N-dimensional coordinates of points.\n * @param {Array} labels Labels of points. This must be the same length as `points`,\n * and values must lie in [0..G-1], where G is the number of groups.\n * @return {number} The silhouette metric for the groupings.\n *\n * @example\n * silhouetteMetric([[0.25], [0.75]], [0, 0]); // => 1.0\n */\nfunction silhouetteMetric(points, labels) {\n const values = silhouette(points, labels);\n return max(values);\n}\n\nexport default silhouetteMetric;\n","/**\n * When removing a value from a list, one does not have to necessary\n * recompute the mean of the list in linear time. They can instead use\n * this function to compute the new mean by providing the current mean,\n * the number of elements in the list that produced it and the value to remove.\n *\n * @since 3.0.0\n * @param {number} mean current mean\n * @param {number} n number of items in the list\n * @param {number} value the value to remove\n * @returns {number} the new mean\n *\n * @example\n * subtractFromMean(20.5, 6, 53); // => 14\n */\nfunction subtractFromMean(mean, n, value) {\n return (mean * n - value) / (n - 1);\n}\n\nexport default subtractFromMean;\n","import mean from \"./mean.js\";\nimport standardDeviation from \"./standard_deviation.js\";\n\n/**\n * This is to compute [a one-sample t-test](https://en.wikipedia.org/wiki/Student%27s_t-test#One-sample_t-test), comparing the mean\n * of a sample to a known value, x.\n *\n * in this case, we're trying to determine whether the\n * population mean is equal to the value that we know, which is `x`\n * here. Usually the results here are used to look up a\n * [p-value](http://en.wikipedia.org/wiki/P-value), which, for\n * a certain level of significance, will let you determine that the\n * null hypothesis can or cannot be rejected.\n *\n * @param {Array} x sample of one or more numbers\n * @param {number} expectedValue expected value of the population mean\n * @returns {number} value\n * @example\n * tTest([1, 2, 3, 4, 5, 6], 3.385).toFixed(2); // => '0.16'\n */\nfunction tTest(x, expectedValue) {\n // The mean of the sample\n const sampleMean = mean(x);\n\n // The standard deviation of the sample\n const sd = standardDeviation(x);\n\n // Square root the length of the sample\n const rootN = Math.sqrt(x.length);\n\n // returning the t value\n return (sampleMean - expectedValue) / (sd / rootN);\n}\n\nexport default tTest;\n","import mean from \"./mean.js\";\nimport sampleVariance from \"./sample_variance.js\";\n\n/**\n * This is to compute [two sample t-test](http://en.wikipedia.org/wiki/Student's_t-test).\n * Tests whether \"mean(X)-mean(Y) = difference\", (\n * in the most common case, we often have `difference == 0` to test if two samples\n * are likely to be taken from populations with the same mean value) with\n * no prior knowledge on standard deviations of both samples\n * other than the fact that they have the same standard deviation.\n *\n * Usually the results here are used to look up a\n * [p-value](http://en.wikipedia.org/wiki/P-value), which, for\n * a certain level of significance, will let you determine that the\n * null hypothesis can or cannot be rejected.\n *\n * `diff` can be omitted if it equals 0.\n *\n * [This is used to reject](https://en.wikipedia.org/wiki/Exclusion_of_the_null_hypothesis)\n * a null hypothesis that the two populations that have been sampled into\n * `sampleX` and `sampleY` are equal to each other.\n *\n * @param {Array} sampleX a sample as an array of numbers\n * @param {Array} sampleY a sample as an array of numbers\n * @param {number} [difference=0]\n * @returns {number|null} test result\n *\n * @example\n * tTestTwoSample([1, 2, 3, 4], [3, 4, 5, 6], 0); // => -2.1908902300206643\n */\nfunction tTestTwoSample(sampleX, sampleY, difference) {\n const n = sampleX.length;\n const m = sampleY.length;\n\n // If either sample doesn't actually have any values, we can't\n // compute this at all, so we return `null`.\n if (!n || !m) {\n return null;\n }\n\n // default difference (mu) is zero\n if (!difference) {\n difference = 0;\n }\n\n const meanX = mean(sampleX);\n const meanY = mean(sampleY);\n const sampleVarianceX = sampleVariance(sampleX);\n const sampleVarianceY = sampleVariance(sampleY);\n\n if (\n typeof meanX === \"number\" &&\n typeof meanY === \"number\" &&\n typeof sampleVarianceX === \"number\" &&\n typeof sampleVarianceY === \"number\"\n ) {\n const weightedVariance =\n ((n - 1) * sampleVarianceX + (m - 1) * sampleVarianceY) /\n (n + m - 2);\n\n return (\n (meanX - meanY - difference) /\n Math.sqrt(weightedVariance * (1 / n + 1 / m))\n );\n }\n}\n\nexport default tTestTwoSample;\n","/**\n * This function calculates the Wilcoxon rank sum statistic for the first sample\n * with respect to the second. The Wilcoxon rank sum test is a non-parametric\n * alternative to the t-test which is equivalent to the\n * [Mann-Whitney U test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test).\n * The statistic is calculated by pooling all the observations together, ranking them,\n * and then summing the ranks associated with one of the samples. If this rank sum is\n * sufficiently large or small we reject the hypothesis that the two samples come\n * from the same distribution in favor of the alternative that one is shifted with\n * respect to the other.\n *\n * @param {Array} sampleX a sample as an array of numbers\n * @param {Array} sampleY a sample as an array of numbers\n * @returns {number} rank sum for sampleX\n *\n * @example\n * wilcoxonRankSum([1, 4, 8], [9, 12, 15]); // => 6\n */\nfunction wilcoxonRankSum(sampleX, sampleY) {\n if (!sampleX.length || !sampleY.length) {\n throw new Error(\"Neither sample can be empty\");\n }\n\n const pooledSamples = sampleX\n .map((x) => ({ label: \"x\", value: x }))\n .concat(sampleY.map((y) => ({ label: \"y\", value: y })))\n .sort((a, b) => a.value - b.value);\n\n for (let rank = 0; rank < pooledSamples.length; rank++) {\n pooledSamples[rank].rank = rank;\n }\n\n let tiedRanks = [pooledSamples[0].rank];\n for (let i = 1; i < pooledSamples.length; i++) {\n if (pooledSamples[i].value === pooledSamples[i - 1].value) {\n tiedRanks.push(pooledSamples[i].rank);\n if (i === pooledSamples.length - 1) {\n replaceRanksInPlace(pooledSamples, tiedRanks);\n }\n } else if (tiedRanks.length > 1) {\n replaceRanksInPlace(pooledSamples, tiedRanks);\n } else {\n tiedRanks = [pooledSamples[i].rank];\n }\n }\n\n function replaceRanksInPlace(pooledSamples, tiedRanks) {\n const average = (tiedRanks[0] + tiedRanks[tiedRanks.length - 1]) / 2;\n for (let i = 0; i < tiedRanks.length; i++) {\n pooledSamples[tiedRanks[i]].rank = average;\n }\n }\n\n let rankSum = 0;\n\n for (let i = 0; i < pooledSamples.length; i++) {\n const sample = pooledSamples[i];\n if (sample.label === \"x\") {\n rankSum += sample.rank + 1;\n }\n }\n\n return rankSum;\n}\n\nexport default wilcoxonRankSum;\n","/**\n * The [Z-Score, or Standard Score](http://en.wikipedia.org/wiki/Standard_score).\n *\n * The standard score is the number of standard deviations an observation\n * or datum is above or below the mean. Thus, a positive standard score\n * represents a datum above the mean, while a negative standard score\n * represents a datum below the mean. It is a dimensionless quantity\n * obtained by subtracting the population mean from an individual raw\n * score and then dividing the difference by the population standard\n * deviation.\n *\n * The z-score is only defined if one knows the population parameters;\n * if one only has a sample set, then the analogous computation with\n * sample mean and sample standard deviation yields the\n * Student's t-statistic.\n *\n * @param {number} x\n * @param {number} mean\n * @param {number} standardDeviation\n * @return {number} z score\n * @example\n * zScore(78, 80, 5); // => -0.4\n */\nfunction zScore(x, mean, standardDeviation) {\n return (x - mean) / standardDeviation;\n}\n\nexport default zScore;\n"],"names":["sum","x","length","let","transition","correction","Number","NaN","i","Math","abs","mean","Error","sumNthPowerDeviations","n","const","tempValue","meanValue","pow","variance","standardDeviation","v","sqrt","modeSorted","sorted","last","value","maxSeen","seenThis","numericSort","slice","sort","a","b","min","max","sumSimple","quantileSorted","p","idx","ceil","quickselect","arr","k","left","right","m","z","log","s","exp","sd","floor","t","j","swap","tmp","quantile","copy","Array","isArray","indices","push","quantileIndex","compare","stack","r","pop","l","quantileSelect","multiQuantileSelect","results","len","quantileRankSorted","mid","lo","hi","lowerBound","u","upperBound","interquartileRange","q1","q2","median","medianAbsoluteDeviation","medianValue","medianAbsoluteDeviations","shuffleInPlace","randomSource","random","temporary","index","shuffle","sample","makeMatrix","columns","rows","matrix","column","uniqueCountSorted","lastSeenValue","uniqueValueCount","ssq","sums","sumsOfSquares","sji","muji","fillMatrixColumn","iMin","iMax","cluster","backtrackMatrix","jlow","ssqjlow","ssqj","jhigh","sampleCovariance","y","xmean","ymean","sampleVariance","sampleStandardDeviation","sampleVarianceX","sampleCorrelation","combineMeans","mean1","n1","mean2","n2","meanSimple","rootMeanSquare","sumOfSquares","BayesianClassifier","this","totalCount","data","prototype","train","item","category","undefined","score","odds","oddsSums","combination","PerceptronModel","weights","bias","predict","features","label","prediction","gradient","epsilon","factorial","accumulator","COEFFICIENTS","LOGSQRT2PI","PI","chiSquaredDistributionTable","SQRT_2PI","kernels","gaussian","bandwidthMethods","nrd","stddev","iqr","kernelDensityEstimation","X","kernel","bandwidthMethod","kernelFn","bandwidth","cumulativeDistribution","round","standardNormalTable","errorFunction","tau","inverseErrorFunction","inv","sign","TypeError","euclideanDistance","diff","labelPoints","points","centroids","map","minDist","MAX_VALUE","dist","calculateCentroids","labels","numCluster","dimension","counts","fill","numPoints","point","current","centroid","calculateChange","total","silhouette","groupings","numGroups","result","createGroups","distances","calculateAllDistances","meanDistanceFromPointToGroup","meanDistanceToNearestGroup","which","d","group","relativeError","actual","expected","newValue","tolerance","trials","probability","cumulativeProbability","cells","binomialCoefficient","func","start","end","maxIterations","errorTolerance","output","distributionType","significance","chiSquared","hypothesizedDistribution","observedFrequencies","expectedFrequencies","degreesOfFreedom","chunkSize","nClusters","nValues","shift","shiftedValue","fillMatrices","clusters","clusterRight","clusterLeft","combinations","subI","subsetCombinations","next","combinationList","unshift","combinationsReplacement","concat","variance1","variance2","newMean","absZ","nClasses","theMin","theMax","breaks","breakSize","gamma","isInteger","sin","seriesDenom","E","POSITIVE_INFINITY","g","reciprocalSum","matrices","lowerClassLimits","varianceCombinations","tmp1","tmp2","sumSquares","w","i4","lowerClassLimit","val","jenksMatrices","kclass","countNum","jenksBreaks","oldCentroids","newCentroids","change","dataLength","sumX","sumY","sumXX","sumXY","mb","mode","Map","modeCount","newCount","get","set","sampleX","sampleY","alternative","testStatistic","testStatDsn","allData","midIndex","permLeft","permRight","permTestStatistic","numExtremeTStats","elements","indexes","permutations","swapFrom","temp","lambda","factorialX","average","err","secondCentralMoment","fourthCentralMoment","xIndexes","pair","yIndexes","xRanks","yRanks","sumSquaredDeviations","sumCubedDeviations","besselsCorrection","theSampleStandardDeviation","expectedValue","difference","meanX","meanY","sampleVarianceY","weightedVariance","pooledSamples","rank","tiedRanks","replaceRanksInPlace","rankSum"],"mappings":"0OAkBA,SAASA,EAAIC,GAET,GAAiB,IAAbA,EAAEC,OACF,OAAO,EAIXC,IAKIC,EALAJ,EAAMC,EAAE,GAGRI,EAAa,EAIjB,GAAmB,iBAARL,EACP,OAAOM,OAAOC,IAGlB,IAAKJ,IAAIK,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAC/B,GAAoB,iBAATP,EAAEO,GACT,OAAOF,OAAOC,IAElBH,EAAaJ,EAAMC,EAAEO,GAIjBC,KAAKC,IAAIV,IAAQS,KAAKC,IAAIT,EAAEO,IAC5BH,GAAcL,EAAMI,EAAaH,EAAEO,GAEnCH,GAAcJ,EAAEO,GAAKJ,EAAaJ,EAGtCA,EAAMI,CACT,CAGD,OAAOJ,EAAMK,CACjB,CCvCA,SAASM,EAAKV,GACV,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,yCAGpB,OAAOZ,EAAIC,GAAKA,EAAEC,MACtB,CCLA,SAASW,EAAsBZ,EAAGa,GAC9BC,IAEIC,EACAR,EAHES,EAAYN,EAAKV,GACnBD,EAAM,EAOV,GAAU,IAANc,EACA,IAAKN,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAEtBR,IADAgB,EAAYf,EAAEO,GAAKS,GACAD,OAGvB,IAAKR,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IACtBR,GAAOS,KAAKS,IAAIjB,EAAEO,GAAKS,EAAWH,GAI1C,OAAOd,CACX,CCtBA,SAASmB,EAASlB,GACd,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,6CAKpB,OAAOC,EAAsBZ,EAAG,GAAKA,EAAEC,MAC3C,CCNA,SAASkB,EAAkBnB,GACvB,GAAiB,IAAbA,EAAEC,OACF,OAAO,EAEXa,IAAMM,EAAIF,EAASlB,GACnB,OAAOQ,KAAKa,KAAKD,EACrB,CCPA,SAASE,EAAWC,GAGhB,GAAsB,IAAlBA,EAAOtB,OACP,MAAM,IAAIU,MAAM,yCAEpB,GAAsB,IAAlBY,EAAOtB,OACP,OAAOsB,EAAO,GAmBlB,IAbArB,IAAIsB,EAAOD,EAAO,GAEdE,EAAQpB,OAAOC,IAEfoB,EAAU,EAGVC,EAAW,EAMNpB,EAAI,EAAGA,EAAIgB,EAAOtB,OAAS,EAAGM,IAE/BgB,EAAOhB,KAAOiB,GAGVG,EAAWD,IACXA,EAAUC,EACVF,EAAQD,GAEZG,EAAW,EACXH,EAAOD,EAAOhB,IAIdoB,IAGR,OAAOF,CACX,CC5CA,SAASG,EAAY5B,GACjB,OACIA,EAEK6B,QAEAC,MAAK,SAAUC,EAAGC,GACf,OAAOD,EAAIC,CAC3B,GAEA,CCjBA,SAASC,EAAIjC,GACT,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,wCAIpB,IADAT,IAAIuB,EAAQzB,EAAE,GACLO,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IACtBP,EAAEO,GAAKkB,IACPA,EAAQzB,EAAEO,IAGlB,OAAOkB,CACX,CCVA,SAASS,EAAIlC,GACT,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,wCAIpB,IADAT,IAAIuB,EAAQzB,EAAE,GACLO,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IACtBP,EAAEO,GAAKkB,IACPA,EAAQzB,EAAEO,IAGlB,OAAOkB,CACX,CCbA,SAASU,EAAUnC,GAEf,IADAE,IAAIuB,EAAQ,EACHlB,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAC/B,GAAoB,iBAATP,EAAEO,GACT,OAAOF,OAAOC,IAElBmB,GAASzB,EAAEO,EACd,CACD,OAAOkB,CACX,CCPA,SAASW,EAAepC,EAAGqC,GACvBvB,IAAMwB,EAAMtC,EAAEC,OAASoC,EACvB,GAAiB,IAAbrC,EAAEC,OACF,MAAM,IAAIU,MAAM,8CACb,GAAI0B,EAAI,GAAKA,EAAI,EACpB,MAAM,IAAI1B,MAAM,qCACb,OAAU,IAAN0B,EAEArC,EAAEA,EAAEC,OAAS,GACP,IAANoC,EAEArC,EAAE,GACFsC,EAAM,GAAM,EAEZtC,EAAEQ,KAAK+B,KAAKD,GAAO,GACnBtC,EAAEC,OAAS,GAAM,GAGhBD,EAAEsC,EAAM,GAAKtC,EAAEsC,IAAQ,EAIxBtC,EAAEsC,EAEjB,CCrBA,SAASE,EAAYC,EAAKC,EAAGC,EAAMC,GAI/B,IAHAD,EAAOA,GAAQ,EACfC,EAAQA,GAASH,EAAIxC,OAAS,EAEvB2C,EAAQD,GAAM,CAEjB,GAAIC,EAAQD,EAAO,IAAK,CACpB7B,IAAMD,EAAI+B,EAAQD,EAAO,EACnBE,EAAIH,EAAIC,EAAO,EACfG,EAAItC,KAAKuC,IAAIlC,GACbmC,EAAI,GAAMxC,KAAKyC,IAAK,EAAIH,EAAK,GAC/BI,EAAK,GAAM1C,KAAKa,KAAMyB,EAAIE,GAAKnC,EAAImC,GAAMnC,GACzCgC,EAAIhC,EAAI,EAAI,IAAGqC,IAAO,GAM1BV,EAAYC,EAAKC,EALDlC,KAAK0B,IAAIS,EAAMnC,KAAK2C,MAAMT,EAAKG,EAAIG,EAAKnC,EAAIqC,IAC3C1C,KAAKyB,IAClBW,EACApC,KAAK2C,MAAMT,GAAM7B,EAAIgC,GAAKG,EAAKnC,EAAIqC,IAG1C,CAEDpC,IAAMsC,EAAIX,EAAIC,GACVnC,EAAIoC,EACJU,EAAIT,EAKR,IAHAU,EAAKb,EAAKE,EAAMD,GACZD,EAAIG,GAASQ,GAAGE,EAAKb,EAAKE,EAAMC,GAE7BrC,EAAI8C,GAAG,CAIV,IAHAC,EAAKb,EAAKlC,EAAG8C,GACb9C,IACA8C,IACOZ,EAAIlC,GAAK6C,GAAG7C,IACnB,KAAOkC,EAAIY,GAAKD,GAAGC,GACtB,CAEGZ,EAAIE,KAAUS,EAAGE,EAAKb,EAAKE,EAAMU,GAGjCC,EAAKb,IADLY,EACaT,GAGbS,GAAKX,IAAGC,EAAOU,EAAI,GACnBX,GAAKW,IAAGT,EAAQS,EAAI,EAC3B,CACL,CAEA,SAASC,EAAKb,EAAKlC,EAAG8C,GAClBvC,IAAMyC,EAAMd,EAAIlC,GAChBkC,EAAIlC,GAAKkC,EAAIY,GACbZ,EAAIY,GAAKE,CACb,CC3CA,SAASC,EAASxD,EAAGqC,GACjBvB,IAAM2C,EAAOzD,EAAE6B,QAEf,GAAI6B,MAAMC,QAAQtB,GAAI,EA4B1B,SAA6BI,EAAKJ,GAE9B,IADAvB,IAAM8C,EAAU,CAAC,GACRrD,EAAI,EAAGA,EAAI8B,EAAEpC,OAAQM,IAC1BqD,EAAQC,KAAKC,EAAcrB,EAAIxC,OAAQoC,EAAE9B,KAE7CqD,EAAQC,KAAKpB,EAAIxC,OAAS,GAC1B2D,EAAQ9B,KAAKiC,GAEbjD,IAAMkD,EAAQ,CAAC,EAAGJ,EAAQ3D,OAAS,GAEnC,KAAO+D,EAAM/D,QAAQ,CACjBa,IAAMmD,EAAIzD,KAAK+B,KAAKyB,EAAME,OACpBC,EAAI3D,KAAK2C,MAAMa,EAAME,OAC3B,KAAID,EAAIE,GAAK,GAAb,CAEArD,IAAM+B,EAAIrC,KAAK2C,OAAOgB,EAAIF,GAAK,GAC/BG,EACI3B,EACAmB,EAAQf,GACRrC,KAAK2C,MAAMS,EAAQO,IACnB3D,KAAK+B,KAAKqB,EAAQK,KAGtBD,EAAMH,KAAKM,EAAGtB,EAAGA,EAAGoB,EAVK,CAW5B,CACL,CAlDQI,CAAoBZ,EAAMpB,GAI1B,IAFAvB,IAAMwD,EAAU,GAEP/D,EAAI,EAAGA,EAAI8B,EAAEpC,OAAQM,IAC1B+D,EAAQ/D,GAAK6B,EAAeqB,EAAMpB,EAAE9B,IAExC,OAAO+D,CACf,CAGQ,OADAF,EAAeX,EADHK,EAAcL,EAAKxD,OAAQoC,GACb,EAAGoB,EAAKxD,OAAS,GACpCmC,EAAeqB,EAAMpB,EAEpC,CAEA,SAAS+B,EAAe3B,EAAKC,EAAGC,EAAMC,GAC9BF,EAAI,GAAM,EACVF,EAAYC,EAAKC,EAAGC,EAAMC,IAG1BJ,EAAYC,EADZC,EAAIlC,KAAK2C,MAAMT,GACKC,EAAMC,GAC1BJ,EAAYC,EAAKC,EAAI,EAAGA,EAAI,EAAGE,GAEvC,CA6BA,SAASmB,EAAQhC,EAAGC,GAChB,OAAOD,EAAIC,CACf,CAEA,SAAS8B,EAAcS,EAAKlC,GACxBvB,IAAMwB,EAAMiC,EAAMlC,EAClB,OAAU,IAANA,EAEOkC,EAAM,EACA,IAANlC,EAEA,EACAC,EAAM,GAAM,EAEZ9B,KAAK+B,KAAKD,GAAO,EACjBiC,EAAM,GAAM,EAGZjC,EAAM,GAINA,CAEf,CC3FA,SAASkC,EAAmBxE,EAAGyB,GAE3B,GAAIA,EAAQzB,EAAE,GACV,OAAO,EAIX,GAAIyB,EAAQzB,EAAEA,EAAEC,OAAS,GACrB,OAAO,EAGXC,IAAIiE,EA2BR,SAAoBnE,EAAGyB,GACnBvB,IAAIuE,EAAM,EACNC,EAAK,EACLC,EAAK3E,EAAEC,OAEX,KAAOyE,EAAKC,GAGJlD,GAASzB,EAFbyE,EAAOC,EAAKC,IAAQ,GAGhBA,EAAKF,EAELC,IAAOD,EAIf,OAAOC,CACX,CA3CYE,CAAW5E,EAAGyB,GAGtB,GAAIzB,EAAEmE,KAAO1C,EACT,OAAO0C,EAAInE,EAAEC,OAGjBkE,IAEArD,IAAM+D,EAoCV,SAAoB7E,EAAGyB,GACnBvB,IAAIuE,EAAM,EACNC,EAAK,EACLC,EAAK3E,EAAEC,OAEX,KAAOyE,EAAKC,GAGJlD,GAASzB,EAFbyE,EAAOC,EAAKC,IAAQ,GAGhBD,IAAOD,EAEPE,EAAKF,EAIb,OAAOC,CACX,CApDcI,CAAW9E,EAAGyB,GAGxB,GAAIoD,IAAMV,EACN,OAAOA,EAAInE,EAAEC,OAOjBa,IAAMmD,EAAIY,EAAIV,EAAI,EAIlB,OAHaF,GAAKY,EAAIV,GAAM,EACTF,EAELjE,EAAEC,MACpB,CCrCA,SAAS8E,EAAmB/E,GAGxBc,IAAMkE,EAAKxB,EAASxD,EAAG,KACjBiF,EAAKzB,EAASxD,EAAG,KAEvB,GAAkB,iBAAPgF,GAAiC,iBAAPC,EACjC,OAAOD,EAAKC,CAEpB,CCLA,SAASC,EAAOlF,GACZ,OAAQwD,EAASxD,EAAG,GACxB,CCRA,SAASmF,EAAwBnF,GAK7B,IAJAc,IAAMsE,EAAcF,EAAOlF,GACrBqF,EAA2B,GAGxB9E,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1B8E,EAAyBxB,KAAKrD,KAAKC,IAAIT,EAAEO,GAAK6E,IAIlD,OAAOF,EAAOG,EAClB,CCNA,SAASC,EAAetF,EAAGuF,GAIvBA,EAAeA,GAAgB/E,KAAKgF,OAcpC,IAVAtF,IAIIuF,EAGAC,EAPAzF,EAASD,EAAEC,OAURA,EAAS,GAGZyF,EAAQlF,KAAK2C,MAAMoC,IAAiBtF,KAGpCwF,EAAYzF,EAAEC,GAGdD,EAAEC,GAAUD,EAAE0F,GACd1F,EAAE0F,GAASD,EAGf,OAAOzF,CACX,CCjCA,SAAS2F,EAAQ3F,EAAGuF,GAKhB,OAAOD,EAHQtF,EAAE6B,QAGa0D,EAClC,CCHA,SAASK,EAAO5F,EAAGa,EAAG0E,GAKlB,OAHiBI,EAAQ3F,EAAGuF,GAGZ1D,MAAM,EAAGhB,EAC7B,CCfA,SAASgF,EAAWC,EAASC,GAEzB,IADAjF,IAAMkF,EAAS,GACNzF,EAAI,EAAGA,EAAIuF,EAASvF,IAAK,CAE9B,IADAO,IAAMmF,EAAS,GACN5C,EAAI,EAAGA,EAAI0C,EAAM1C,IACtB4C,EAAOpC,KAAK,GAEhBmC,EAAOnC,KAAKoC,EACf,CACD,OAAOD,CACX,CCNA,SAASE,EAAkBlG,GAGvB,IAFAE,IACIiG,EADAC,EAAmB,EAEd7F,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAChB,IAANA,GAAWP,EAAEO,KAAO4F,IACpBA,EAAgBnG,EAAEO,GAClB6F,KAGR,OAAOA,CACX,CCPA,SAASC,EAAIhD,EAAG9C,EAAG+F,EAAMC,GACrBrG,IAAIsG,EACJ,GAAInD,EAAI,EAAG,CACPvC,IAAM2F,GAAQH,EAAK/F,GAAK+F,EAAKjD,EAAI,KAAO9C,EAAI8C,EAAI,GAChDmD,EACID,EAAchG,GAAKgG,EAAclD,EAAI,IAAM9C,EAAI8C,EAAI,GAAKoD,EAAOA,CAC3E,MACQD,EAAMD,EAAchG,GAAM+F,EAAK/F,GAAK+F,EAAK/F,IAAOA,EAAI,GAExD,OAAIiG,EAAM,EACC,EAEJA,CACX,CAeA,SAASE,EACLC,EACAC,EACAC,EACAb,EACAc,EACAR,EACAC,GAEA,KAAII,EAAOC,GAAX,CAKA9F,IAAMP,EAAIC,KAAK2C,OAAOwD,EAAOC,GAAQ,GAErCZ,EAAOa,GAAStG,GAAKyF,EAAOa,EAAU,GAAGtG,EAAI,GAC7CuG,EAAgBD,GAAStG,GAAKA,EAE9BL,IAAI6G,EAAOF,EAEPF,EAAOE,IACPE,EAAOvG,KAAK0B,IAAI6E,EAAMD,EAAgBD,GAASF,EAAO,IAAM,IAEhEI,EAAOvG,KAAK0B,IAAI6E,EAAMD,EAAgBD,EAAU,GAAGtG,IAAM,GAEzDL,IAOIsG,EAEAQ,EACAC,EAVAC,EAAQ3G,EAAI,EACZqG,EAAOZ,EAAO,GAAG/F,OAAS,IAE1BiH,EAAQ1G,KAAKyB,IAAIiF,EAAOJ,EAAgBD,GAASD,EAAO,IAAM,IAQlE,IAAK1G,IAAImD,EAAI6D,EAAO7D,GAAK0D,MACrBP,EAAMH,EAAIhD,EAAG9C,EAAG+F,EAAMC,IAEZP,EAAOa,EAAU,GAAGE,EAAO,IAAMf,EAAOa,GAAStG,MAH9B8C,GAU7B2D,EAFSX,EAAIU,EAAMxG,EAAG+F,EAAMC,GAETP,EAAOa,EAAU,GAAGE,EAAO,IAEhCf,EAAOa,GAAStG,KAE1ByF,EAAOa,GAAStG,GAAKyG,EACrBF,EAAgBD,GAAStG,GAAKwG,GAElCA,KAEAE,EAAOT,EAAMR,EAAOa,EAAU,GAAGxD,EAAI,IAC1B2C,EAAOa,GAAStG,KACvByF,EAAOa,GAAStG,GAAK0G,EACrBH,EAAgBD,GAAStG,GAAK8C,GAItCqD,EACIC,EACApG,EAAI,EACJsG,EACAb,EACAc,EACAR,EACAC,GAEJG,EACInG,EAAI,EACJqG,EACAC,EACAb,EACAc,EACAR,EACAC,EApEH,CAsEL,CC/GA,SAASY,EAAiBnH,EAAGoH,GAEzB,GAAIpH,EAAEC,SAAWmH,EAAEnH,OACf,MAAM,IAAIU,MAAM,wDAGpB,GAAIX,EAAEC,OAAS,EACX,MAAM,IAAIU,MACN,qEAgBR,IARAG,IAAMuG,EAAQ3G,EAAKV,GACbsH,EAAQ5G,EAAK0G,GACfrH,EAAM,EAMDQ,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1BR,IAAQC,EAAEO,GAAK8G,IAAUD,EAAE7G,GAAK+G,GASpC,OAAOvH,GAHmBC,EAAEC,OAAS,EAIzC,CC/BA,SAASsH,EAAevH,GACpB,GAAIA,EAAEC,OAAS,EACX,MAAM,IAAIU,MAAM,oDAWpB,OARkCC,EAAsBZ,EAAG,IAKjCA,EAAEC,OAAS,EAIzC,CCrBA,SAASuH,EAAwBxH,GAC7Bc,IAAM2G,EAAkBF,EAAevH,GACvC,OAAOQ,KAAKa,KAAKoG,EACrB,CCDA,SAASC,EAAkB1H,EAAGoH,GAK1B,OAJYD,EAAiBnH,EAAGoH,GACnBI,EAAwBxH,GACxBwH,EAAwBJ,EAGzC,CCHA,SAASO,EAAaC,EAAOC,EAAIC,EAAOC,GACpC,OAAQH,EAAQC,EAAKC,EAAQC,IAAOF,EAAKE,EAC7C,CCGA,SAASC,EAAWhI,GAChB,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,+CAGpB,OAAOwB,EAAUnC,GAAKA,EAAEC,MAC5B,CCdA,SAASgI,EAAejI,GACpB,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,mDAIpB,IADAT,IAAIgI,EAAe,EACV3H,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1B2H,GAAgB1H,KAAKS,IAAIjB,EAAEO,GAAI,GAGnC,OAAOC,KAAKa,KAAK6G,EAAelI,EAAEC,OACtC,CCLM,IAAAkI,EAGF,WAGIC,KAAKC,WAAa,EAElBD,KAAKE,KAAO,EAChB,EAUAH,EAAAI,UAAAC,MAAA,SAAMC,EAAMC,GAQR,IAAK5H,IAAM4B,KALN0F,KAAKE,KAAKI,KACXN,KAAKE,KAAKI,GAAY,IAIVD,EAAM,CAClB3H,IAAMM,EAAIqH,EAAK/F,QAGgBiG,IAA3BP,KAAKE,KAAKI,GAAUhG,KACpB0F,KAAKE,KAAKI,GAAUhG,GAAK,CAAA,QAEKiG,IAA9BP,KAAKE,KAAKI,GAAUhG,GAAGtB,KACvBgH,KAAKE,KAAKI,GAAUhG,GAAGtB,GAAK,GAIhCgH,KAAKE,KAAKI,GAAUhG,GAAGtB,IAC1B,CAGDgH,KAAKC,YACT,cAUAO,MAAK,SAACH,GAEF3H,IACI4H,EADEG,EAAO,CAAA,EAKb,IAAK/H,IAAM4B,KAAK+F,EAAM,CAClB3H,IAAMM,EAAIqH,EAAK/F,GACf,IAAKgG,KAAYN,KAAKE,KAGlBO,EAAKH,GAAY,GAMbN,KAAKE,KAAKI,GAAUhG,GACpBmG,EAAKH,GAAUhG,EAAI,IAAMtB,IACpBgH,KAAKE,KAAKI,GAAUhG,GAAGtB,IAAM,GAAKgH,KAAKC,WAE5CQ,EAAKH,GAAUhG,EAAI,IAAMtB,GAAK,CAGzC,CAGDN,IAAMgI,EAAW,CAAA,EAEjB,IAAKJ,KAAYG,EAKb,IAAK/H,IAAMiI,KADXD,EAASJ,GAAY,EACKG,EAAKH,GAC3BI,EAASJ,IAAaG,EAAKH,GAAUK,GAI7C,OAAOD,CACX,EC/FE,IAAAE,EAGF,WAGIZ,KAAKa,QAAU,GAIfb,KAAKc,KAAO,CAChB,cAQAC,QAAO,SAACC,GAGJ,GAAIA,EAASnJ,SAAWmI,KAAKa,QAAQhJ,OACjC,OAAO,KAMX,IADAC,IAAI0I,EAAQ,EACHrI,EAAI,EAAGA,EAAI6H,KAAKa,QAAQhJ,OAAQM,IACrCqI,GAASR,KAAKa,QAAQ1I,GAAK6I,EAAS7I,GAKxC,OAHAqI,GAASR,KAAKc,MAGF,EACD,EAEA,CAEf,EAUAF,EAAAT,UAAAC,MAAA,SAAMY,EAAUC,GAEZ,GAAc,IAAVA,GAAyB,IAAVA,EACf,OAAO,KAOPD,EAASnJ,SAAWmI,KAAKa,QAAQhJ,SACjCmI,KAAKa,QAAUG,EACfhB,KAAKc,KAAO,GAGhBpI,IAAMwI,EAAalB,KAAKe,QAAQC,GAEhC,GAA0B,iBAAfE,GAA2BA,IAAeD,EAAO,CAExD,IADAvI,IAAMyI,EAAWF,EAAQC,EAChB/I,EAAI,EAAGA,EAAI6H,KAAKa,QAAQhJ,OAAQM,IACrC6H,KAAKa,QAAQ1I,IAAMgJ,EAAWH,EAAS7I,GAE3C6H,KAAKc,MAAQK,CAChB,CACD,OAAOnB,IACX,EC7DC,IAACoB,EAAU,KCtBhB,SAASC,EAAU5I,GAEf,GAAIA,EAAI,EACJ,MAAM,IAAIF,MAAM,2CAGpB,GAAIH,KAAK2C,MAAMtC,KAAOA,EAClB,MAAM,IAAIF,MAAM,uCAQpB,IADAT,IAAIwJ,EAAc,EACTnJ,EAAI,EAAGA,GAAKM,EAAGN,IAGpBmJ,GAAenJ,EAEnB,OAAOmJ,CACX,CChCA5I,IAAM6I,EAAe,CACjB,kBAAwB,mBAAwB,kBAChD,oBAAwB,kBAAwB,qBAChD,sBAA4B,qBAC5B,sBAA4B,sBAC5B,uBAA2B,qBAC3B,sBAA4B,sBAC5B,uBAIEC,EAAapJ,KAAKuC,IAAIvC,KAAKa,KAAK,EAAIb,KAAKqJ,KCD1C,IAACC,EAA8B,CAChC,EAAG,CACC,KAAO,EACP,IAAM,EACN,KAAO,EACP,IAAM,EACN,GAAK,IACL,GAAK,IACL,GAAK,KACL,IAAM,KACN,KAAO,KACP,IAAM,KACN,KAAO,MAEX,EAAG,CACC,KAAO,IACP,IAAM,IACN,KAAO,IACP,IAAM,GACN,GAAK,IACL,GAAK,KACL,GAAK,KACL,IAAM,KACN,KAAO,KACP,IAAM,KACN,KAAO,MAEX,EAAG,CACC,KAAO,IACP,IAAM,IACN,KAAO,IACP,IAAM,IACN,GAAK,IACL,GAAK,KACL,GAAK,KACL,IAAM,KACN,KAAO,KACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,IACP,IAAM,GACN,KAAO,IACP,IAAM,IACN,GAAK,KACL,GAAK,KACL,GAAK,KACL,IAAM,KACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,IACP,IAAM,IACN,KAAO,IACP,IAAM,KACN,GAAK,KACL,GAAK,KACL,GAAK,KACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,IACP,IAAM,IACN,KAAO,KACP,IAAM,KACN,GAAK,IACL,GAAK,KACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,IACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,KACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,KACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,EAAG,CACC,KAAO,KACP,IAAM,KACN,KAAO,IACP,IAAM,KACN,GAAK,KACL,GAAK,KACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,KACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,IACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,IACP,IAAM,KACN,GAAK,IACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,MAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,IACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,GACN,KAAO,MACP,IAAM,MACN,KAAO,MAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,KACN,KAAO,MACP,IAAM,GACN,KAAO,OAEX,GAAI,CACA,KAAO,IACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,KACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,KACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,KACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,IAEX,GAAI,CACA,KAAO,KACP,IAAM,IACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,MAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,MAEX,GAAI,CACA,KAAO,KACP,IAAM,KACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,KACP,IAAM,MACN,KAAO,KACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,KACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,KACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,KACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,KACN,KAAO,MACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,KACL,IAAM,MACN,KAAO,KACP,IAAM,MACN,KAAO,OAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,MACN,KAAO,MACP,IAAM,OACN,KAAO,QAEX,GAAI,CACA,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,OACN,KAAO,OACP,IAAM,OACN,KAAO,QAEX,GAAI,CACA,KAAO,KACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,OACL,IAAM,OACN,KAAO,OACP,IAAM,OACN,KAAO,OAEX,IAAK,CACD,KAAO,MACP,IAAM,MACN,KAAO,MACP,IAAM,MACN,GAAK,MACL,GAAK,MACL,GAAK,MACL,IAAM,OACN,KAAO,OACP,IAAM,OACN,KAAO,SCxefhJ,IAAMiJ,EAAWvJ,KAAKa,KAAK,EAAIb,KAAKqJ,IAM9BG,EAAU,CAKZC,SAAU,SAAUpF,GAChB,OAAOrE,KAAKyC,KAAK,GAAM4B,EAAIA,GAAKkF,CACnC,GAOCG,EAAmB,CAQrBC,IAAK,SAAUnK,GACXE,IAAI8C,EAAIoH,EAAOpK,GACTqK,EAAMtF,EAAmB/E,GAI/B,MAHmB,iBAARqK,IACPrH,EAAIxC,KAAKyB,IAAIe,EAAGqH,EAAM,OAEnB,KAAOrH,EAAIxC,KAAKS,IAAIjB,EAAEC,QAAS,GACzC,GAcL,SAASqK,EAAwBC,EAAGC,EAAQC,GACxCvK,IAAIwK,EAYAC,EAXJ,QAAehC,IAAX6B,EACAE,EAAWV,EAAQC,cAChB,GAAsB,iBAAXO,EAAqB,CACnC,IAAKR,EAAQQ,GACT,MAAM,IAAI7J,MAAM,mBAAqB6J,EAAS,KAElDE,EAAWV,EAAQQ,EAC3B,MACQE,EAAWF,EAIf,QAA+B,IAApBC,EACPE,EAAYT,EAAiBC,IAAII,QAC9B,GAA+B,iBAApBE,EAA8B,CAC5C,IAAKP,EAAiBO,GAClB,MAAM,IAAI9J,MACN,6BAA+B8J,EAAkB,KAGzDE,EAAYT,EAAiBO,GAAiBF,EACtD,MACQI,EAAYF,EAGhB,OAAO,SAAUzK,GACbE,IAAIK,EAAI,EACJR,EAAM,EACV,IAAKQ,EAAI,EAAGA,EAAIgK,EAAEtK,OAAQM,IACtBR,GAAO2K,GAAU1K,EAAIuK,EAAEhK,IAAMoK,GAEjC,OAAO5K,EAAM4K,EAAYJ,EAAEtK,MACnC,CACA,CCvFAa,IAAMiJ,EAAWvJ,KAAKa,KAAK,EAAIb,KAAKqJ,IAEpC,SAASe,EAAuB9H,GAK5B,IAJA5C,IAAIH,EAAM+C,EACNS,EAAMT,EAGDvC,EAAI,EAAGA,EAAI,GAAIA,IAEpBR,GADAwD,GAAQT,EAAIA,GAAM,EAAIvC,EAAI,GAG9B,OACIC,KAAKqK,MAA0D,KAAnD,GAAO9K,EAAMgK,EAAYvJ,KAAKyC,KAAMH,EAAIA,EAAK,KACzD,GAER,CAYA,IAFM,IAAAgI,EAAsB,GAEnBhI,EAAI,EAAGA,GAAK,KAAMA,GAAK,IAC5BgI,EAAoBjH,KAAK+G,EAAuB9H,ICdpD,SAASiI,EAAc/K,GACnBc,IAAMsC,EAAI,GAAK,EAAI,GAAM5C,KAAKC,IAAIT,IAC5BgL,EACF5H,EACA5C,KAAKyC,KACAjD,EAAIA,UACO,UAAaoD,EAAI,WAAcA,EAAI,YAAcA,EACrD,YACAA,EACA,WACAA,EACA,WACAA,EACA,WACAA,EACA,WACAA,EACA,YACAA,EACJ,YAEZ,OAAIpD,GAAK,EACE,EAAIgL,EAEJA,EAAM,CAErB,CChCA,SAASC,EAAqBjL,GAC1Bc,IAAMiB,EAAK,GAAKvB,KAAKqJ,GAAK,IAAO,EAAIrJ,KAAKqJ,IAAM,EAAIrJ,KAAKqJ,KAEnDqB,EAAM1K,KAAKa,KACbb,KAAKa,KACDb,KAAKS,IAAI,GAAKT,KAAKqJ,GAAK9H,GAAKvB,KAAKuC,IAAI,EAAI/C,EAAIA,GAAK,EAAG,GAClDQ,KAAKuC,IAAI,EAAI/C,EAAIA,GAAK+B,IAEzB,GAAKvB,KAAKqJ,GAAK9H,GAAKvB,KAAKuC,IAAI,EAAI/C,EAAIA,GAAK,IAGnD,OAAIA,GAAK,EACEkL,GAECA,CAEhB,CCZA,SAASC,EAAKnL,GACV,GAAiB,iBAANA,EACP,OAAIA,EAAI,GACI,EACK,IAANA,EACA,EAEA,EAGX,MAAM,IAAIoL,UAAU,eAE5B,CClBA,SAASC,GAAkB1I,EAAMC,GAE7B,IADA1C,IAAIH,EAAM,EACDQ,EAAI,EAAGA,EAAIoC,EAAK1C,OAAQM,IAAK,CAClCO,IAAMwK,EAAO3I,EAAKpC,GAAKqC,EAAMrC,GAC7BR,GAAOuL,EAAOA,CACjB,CACD,OAAO9K,KAAKa,KAAKtB,EACrB,CCkCA,SAASwL,GAAYC,EAAQC,GACzB,OAAOD,EAAOE,KAAI,SAACrJ,GAGf,IAFAnC,IAAIyL,EAAUtL,OAAOuL,UACjBvC,GAAS,EACJ9I,EAAI,EAAGA,EAAIkL,EAAUxL,OAAQM,IAAK,CACvCO,IAAM+K,EAAOR,GAAkBhJ,EAAGoJ,EAAUlL,IACxCsL,EAAOF,IACPA,EAAUE,EACVxC,EAAQ9I,EAEf,CACD,OAAO8I,CACf,GACA,CAYA,SAASyC,GAAmBN,EAAQO,EAAQC,GAQxC,IANAlL,IAAMmL,EAAYT,EAAO,GAAGvL,OACtBwL,EAAY5F,EAAWmG,EAAYC,GACnCC,EAASxI,MAAMsI,GAAYG,KAAK,GAGhCC,EAAYZ,EAAOvL,OAChBM,EAAI,EAAGA,EAAI6L,EAAW7L,IAAK,CAIhC,IAHAO,IAAMuL,EAAQb,EAAOjL,GACf8I,EAAQ0C,EAAOxL,GACf+L,EAAUb,EAAUpC,GACjBhG,EAAI,EAAGA,EAAI4I,EAAW5I,IAC3BiJ,EAAQjJ,IAAMgJ,EAAMhJ,GAExB6I,EAAO7C,IAAU,CACpB,CAGD,IAAKnJ,IAAIK,EAAI,EAAGA,EAAIyL,EAAYzL,IAAK,CACjC,GAAkB,IAAd2L,EAAO3L,GACP,MAAM,IAAII,kBAAkBJ,EAAC,mBAGjC,IADAO,IAAMyL,EAAWd,EAAUlL,GAClB8C,EAAI,EAAGA,EAAI4I,EAAW5I,IAC3BkJ,EAASlJ,IAAM6I,EAAO3L,EAE7B,CAED,OAAOkL,CACX,CAUA,SAASe,GAAgB7J,EAAMC,GAE3B,IADA1C,IAAIuM,EAAQ,EACHlM,EAAI,EAAGA,EAAIoC,EAAK1C,OAAQM,IAC7BkM,GAASpB,GAAkB1I,EAAKpC,GAAIqC,EAAMrC,IAE9C,OAAOkM,CACX,CCtGA,SAASC,GAAWlB,EAAQO,GACxB,GAAIP,EAAOvL,SAAW8L,EAAO9L,OACzB,MAAM,IAAIU,MAAM,8CAKpB,IAHAG,IAAM6L,EAiCV,SAAsBZ,GAGlB,IAFAjL,IAAM8L,EAAY,EAAI1K,EAAI6J,GACpBc,EAASnJ,MAAMkJ,GACZrM,EAAI,EAAGA,EAAIwL,EAAO9L,OAAQM,IAAK,CACpCO,IAAMuI,EAAQ0C,EAAOxL,QACCoI,IAAlBkE,EAAOxD,KACPwD,EAAOxD,GAAS,IAEpBwD,EAAOxD,GAAOxF,KAAKtD,EACtB,CACD,OAAOsM,CACX,CA5CsBC,CAAaf,GACzBgB,EAqDV,SAA+BvB,GAG3B,IAFA1K,IAAMsL,EAAYZ,EAAOvL,OACnB4M,EAAShH,EAAWuG,EAAWA,GAC5B7L,EAAI,EAAGA,EAAI6L,EAAW7L,IAC3B,IAAKL,IAAImD,EAAI,EAAGA,EAAI9C,EAAG8C,IACnBwJ,EAAOtM,GAAG8C,GAAKgI,GAAkBG,EAAOjL,GAAIiL,EAAOnI,IACnDwJ,EAAOxJ,GAAG9C,GAAKsM,EAAOtM,GAAG8C,GAGjC,OAAOwJ,CACX,CA/DsBG,CAAsBxB,GAClCqB,EAAS,GACNtM,EAAI,EAAGA,EAAIiL,EAAOvL,OAAQM,IAAK,CACpCL,IAAI8C,EAAI,EACR,GAAI2J,EAAUZ,EAAOxL,IAAIN,OAAS,EAAG,CACjCa,IAAMiB,EAAIkL,GACN1M,EACAoM,EAAUZ,EAAOxL,IACjBwM,GAEE/K,EAAIkL,GACN3M,EACAwL,EACAY,EACAI,GAEJ/J,GAAKhB,EAAID,GAAKvB,KAAK0B,IAAIH,EAAGC,EAC7B,CACD6K,EAAOhJ,KAAKb,EACf,CACD,OAAO6J,CACX,CA0DA,SAASK,GAA2BC,EAAOpB,EAAQY,EAAWI,GAG1D,IAFAjM,IAAMuI,EAAQ0C,EAAOoB,GACjBN,EAASxM,OAAOuL,UACXrL,EAAI,EAAGA,EAAIoM,EAAU1M,OAAQM,IAClC,GAAIA,IAAM8I,EAAO,CACbvI,IAAMsM,EAAIH,GACNE,EACAR,EAAUpM,GACVwM,GAEAK,EAAIP,IACJA,EAASO,EAEhB,CAEL,OAAOP,CACX,CAeA,SAASI,GAA6BE,EAAOE,EAAON,GAEhD,IADA7M,IAAIuM,EAAQ,EACHlM,EAAI,EAAGA,EAAI8M,EAAMpN,OAAQM,IAC9BkM,GAASM,EAAUI,GAAOE,EAAM9M,IAEpC,OAAOkM,EAAQY,EAAMpN,MACzB,CCzGA,SAASqN,GAAcC,EAAQC,GAI3B,OAAe,IAAXD,GAA6B,IAAbC,EACT,EAGJhN,KAAKC,KAAK8M,EAASC,GAAYA,EAC1C,wDCzBA,SAAmB9M,EAAMG,EAAG4M,GACxB,OAAO/M,GAAQ+M,EAAW/M,IAASG,EAAI,EAC3C,gBCPA,SAAqB0M,EAAQC,EAAUE,GACnC,sBAD+ClE,GACxC8D,GAAcC,EAAQC,IAAaE,CAC9C,qECIA,SAA+BrL,GAE3B,GAAIA,EAAI,GAAKA,EAAI,EACb,MAAM,IAAI1B,MACN,8EAIR,MAAO,CAAC,EAAI0B,EAAGA,EACnB,yBCdA,SAA8BsL,EAAQC,GAGlC,KAAIA,EAAc,GAAKA,EAAc,GAAKD,GAAU,GAAKA,EAAS,GAAM,GAAxE,CAUAzN,IAAIF,EAAI,EACJ6N,EAAwB,EACtBC,EAAQ,GACVC,EAAsB,EAK1B,GAEID,EAAM9N,GACF+N,EACAvN,KAAKS,IAAI2M,EAAa5N,GACtBQ,KAAKS,IAAI,EAAI2M,EAAaD,EAAS3N,GACvC6N,GAAyBC,EAAM9N,GAE/B+N,EAAuBA,GAAuBJ,IAD9C3N,EAC2D,GAAMA,QAG5D6N,EAAwB,OAEjC,OAAOC,CA7BN,CA8BL,WC5BA,SAAgBE,EAAMC,EAAOC,EAAKC,EAAeC,GAC7C,GAAoB,mBAATJ,EACP,MAAM,IAAI5C,UAAU,2BAExB,IAAKlL,IAAIK,EAAI,EAAGA,EAAI4N,EAAe5N,IAAK,CACpCO,IAAMuN,GAAUJ,EAAQC,GAAO,EAE/B,GACqB,IAAjBF,EAAKK,IACL7N,KAAKC,KAAKyN,EAAMD,GAAS,GAAKG,EAE9B,OAAOC,EAGPlD,EAAK6C,EAAKK,MAAalD,EAAK6C,EAAKC,IACjCA,EAAQI,EAERH,EAAMG,CAEb,CAED,MAAM,IAAI1N,MAAM,wCACpB,4DCXA,SAAiC2H,EAAMgG,EAAkBC,GAgBrD,IAdAzN,IAEI0N,EAAa,EAMXC,EAA2BH,EARf5N,EAAK4H,IASjBoG,EAAsB,GACtBC,EAAsB,GAInBpO,EAAI,EAAGA,EAAI+H,EAAKrI,OAAQM,SACQoI,IAAjC+F,EAAoBpG,EAAK/H,MACzBmO,EAAoBpG,EAAK/H,IAAM,GAEnCmO,EAAoBpG,EAAK/H,MAM7B,IAAKL,IAAIK,EAAI,EAAGA,EAAImO,EAAoBzO,OAAQM,SACboI,IAA3B+F,EAAoBnO,KACpBmO,EAAoBnO,GAAK,GAMjC,IAAKO,IAAM4B,KAAK+L,EACR/L,KAAKgM,IACLC,GAAqBjM,GAAK+L,EAAyB/L,GAAK4F,EAAKrI,QAOrE,IAAKC,IAAIwC,EAAIiM,EAAoB1O,OAAS,EAAGyC,GAAK,EAAGA,IAC7CiM,EAAoBjM,GAAK,IACzBiM,EAAoBjM,EAAI,IAAMiM,EAAoBjM,GAClDiM,EAAoBzK,MAEpBwK,EAAoBhM,EAAI,IAAMgM,EAAoBhM,GAClDgM,EAAoBxK,OAM5B,IAAKhE,IAAIwC,EAAI,EAAGA,EAAIgM,EAAoBzO,OAAQyC,IAC5C8L,GACIhO,KAAKS,IAAIyN,EAAoBhM,GAAKiM,EAAoBjM,GAAI,GAC1DiM,EAAoBjM,GAQ5B5B,IAAM8N,EAAmBF,EAAoBzO,OA3DnC,EA2DgD,EAC1D,OACI6J,EAA4B8E,GAAkBL,GAAgBC,CAEtE,UCnFA,SAAexO,EAAG6O,GAEd/N,IAAMuN,EAAS,GAMf,GAAIQ,EAAY,EACZ,MAAM,IAAIlO,MAAM,wCAGpB,GAAIH,KAAK2C,MAAM0L,KAAeA,EAC1B,MAAM,IAAIlO,MAAM,iCAKpB,IAAKT,IAAI+N,EAAQ,EAAGA,EAAQjO,EAAEC,OAAQgO,GAASY,EAI3CR,EAAOxK,KAAK7D,EAAE6B,MAAMoM,EAAOA,EAAQY,IAEvC,OAAOR,CACX,Y7B2LA,SAAiBrO,EAAG8O,GAChB,GAAIA,EAAY9O,EAAEC,OACd,MAAM,IAAIU,MACN,2DAIRG,IAAMS,EAASK,EAAY5B,GAM3B,GAAoB,IAJAkG,EAAkB3E,GAKlC,MAAO,CAACA,GAIZT,IAAMkF,EAASH,EAAWiJ,EAAWvN,EAAOtB,QAEtC6G,EAAkBjB,EAAWiJ,EAAWvN,EAAOtB,SAhHzD,SAAsBqI,EAAMtC,EAAQc,GAWhC,IAVAhG,IAAMiO,EAAU/I,EAAO,GAAG/F,OAGpB+O,EAAQ1G,EAAK9H,KAAK2C,MAAM4L,EAAU,IAGlCzI,EAAO,GACPC,EAAgB,GAGbhG,EAAI,EAAG0O,OAAY,EAAE1O,EAAIwO,IAAWxO,EACzC0O,EAAe3G,EAAK/H,GAAKyO,EACf,IAANzO,GACA+F,EAAKzC,KAAKoL,GACV1I,EAAc1C,KAAKoL,EAAeA,KAElC3I,EAAKzC,KAAKyC,EAAK/F,EAAI,GAAK0O,GACxB1I,EAAc1C,KACV0C,EAAchG,EAAI,GAAK0O,EAAeA,IAK9CjJ,EAAO,GAAGzF,GAAK8F,EAAI,EAAG9F,EAAG+F,EAAMC,GAC/BO,EAAgB,GAAGvG,GAAK,EAK5B,IAAKL,IAAI2G,EAAU,EAAGA,EAAUb,EAAO/F,SAAU4G,EAQ7CH,EAPIG,EAAUb,EAAO/F,OAAS,EACnB4G,EAGAkI,EAAU,EAKjBA,EAAU,EACVlI,EACAb,EACAc,EACAR,EACAC,EAGZ,CAsEI2I,CAAa3N,EAAQyE,EAAQc,GAY7B,IANAhG,IAAMqO,EAAW,GACbC,EAAetI,EAAgB,GAAG7G,OAAS,EAKtC4G,EAAUC,EAAgB7G,OAAS,EAAG4G,GAAW,EAAGA,IAAW,CACpE/F,IAAMuO,EAAcvI,EAAgBD,GAASuI,GAK7CD,EAAStI,GAAWtF,EAAOM,MAAMwN,EAAaD,EAAe,GAEzDvI,EAAU,IACVuI,EAAeC,EAAc,EAEpC,CAED,OAAOF,CACX,2B8B1QA,SAAgCnP,GAC5B,OAAOwH,EAAwBxH,GAAKU,EAAKV,EAC7C,iBCNA,SAASsP,EAAatP,EAAG0C,GACrBxC,IAAIK,EACAgP,EAEAC,EACAC,EAFEC,EAAkB,GAIxB,IAAKnP,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IACtB,GAAU,IAANmC,EACAgN,EAAgB7L,KAAK,CAAC7D,EAAEO,UAGxB,IADAiP,EAAqBF,EAAatP,EAAE6B,MAAMtB,EAAI,EAAGP,EAAEC,QAASyC,EAAI,GAC3D6M,EAAO,EAAGA,EAAOC,EAAmBvP,OAAQsP,KAC7CE,EAAOD,EAAmBD,IACrBI,QAAQ3P,EAAEO,IACfmP,EAAgB7L,KAAK4L,GAIjC,OAAOC,CACX,4BCnBA,SAASE,EAAwB5P,EAAG0C,GAGhC,IAFA5B,IAAM4O,EAAkB,GAEfnP,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1B,GAAU,IAANmC,EAGAgN,EAAgB7L,KAAK,CAAC7D,EAAEO,UAcxB,IALAO,IAAM0O,EAAqBI,EACvB5P,EAAE6B,MAAMtB,EAAGP,EAAEC,QACbyC,EAAI,GAGCW,EAAI,EAAGA,EAAImM,EAAmBvP,OAAQoD,IAC3CqM,EAAgB7L,KAAK,CAAC7D,EAAEO,IAAIsP,OAAOL,EAAmBnM,KAKlE,OAAOqM,CACX,sCCnBA,SAA0BI,EAAWlI,EAAOC,EAAIkI,EAAWjI,EAAOC,GAC9DjH,IAAMkP,EAAUrI,EAAaC,EAAOC,EAAIC,EAAOC,GAE/C,OACKF,GAAMiI,EAAYtP,KAAKS,IAAI2G,EAAQoI,EAAS,IACzCjI,GAAMgI,EAAYvP,KAAKS,IAAI6G,EAAQkI,EAAS,MAC/CnI,EAAKE,EAEd,qCCvBA,SAA0C/H,GACtC,OAAO,GAAKQ,KAAKyC,KAAKjD,GAAK,EAC/B,mCCQA,SAAwC8C,GAEpChC,IAAMmP,EAAOzP,KAAKC,IAAIqC,GAKhB4C,EAAQlF,KAAKyB,IACfzB,KAAKqK,MAAa,IAAPoF,GACXnF,EAAoB7K,OAAS,GAMjC,OAAI6C,GAAK,EACEgI,EAAoBpF,GAKpBlF,KAAKqK,MAAyC,KAAlC,EAAIC,EAAoBpF,KAAiB,GAEpE,oCCvBA,SAA6B1F,EAAGkQ,GAC5B,GAAIlQ,EAAEC,OAAS,EACX,OAAOD,EAgBX,IAbAc,IAAMqP,EAASlO,EAAIjC,GACboQ,EAASlO,EAAIlC,GAIbqQ,EAAS,CAACF,GAIVG,GAAaF,EAASD,GAAUD,EAI7B3P,EAAI,EAAGA,EAAI2P,EAAU3P,IAC1B8P,EAAOxM,KAAKwM,EAAO,GAAKC,EAAY/P,GAOxC,OAFA8P,EAAOxM,KAAKuM,GAELC,CACX,qCC/BA,SAAgBrQ,GACZ,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,2CAKpB,IAFAT,IAAI+B,EAAMjC,EAAE,GACRkC,EAAMlC,EAAE,GACHO,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IACtBP,EAAEO,GAAK2B,IACPA,EAAMlC,EAAEO,IAERP,EAAEO,GAAK0B,IACPA,EAAMjC,EAAEO,IAGhB,MAAO,CAAC0B,EAAKC,EACjB,iBClBA,SAAsBlC,GAClB,MAAO,CAACA,EAAE,GAAIA,EAAEA,EAAEC,OAAS,GAC/B,wBCMA,SAASsQ,EAAM1P,GACX,GAAIR,OAAOmQ,UAAU3P,GACjB,OAAIA,GAAK,EAEER,OAAOC,IAGPmJ,EAAU5I,EAAI,GAO7B,KAFAA,EAEQ,EAGJ,OAAOL,KAAKqJ,IAAMrJ,KAAKiQ,IAAIjQ,KAAKqJ,IAAMhJ,GAAK0P,GAAO1P,IAGlDC,IAGM4P,EAAc7P,EAAI,EAAI,EAW5B,OAbIL,KAAKS,IAAIJ,EAAIL,KAAKmQ,EAAG9P,GAAKL,KAAKa,KAAK,EAAIb,KAAKqJ,IAAMhJ,EAAI,EAAI,KAK3D,EACA,EAAI,IAAML,KAAKS,IAAIyP,EAAa,GAChC,EAAI,MAAQlQ,KAAKS,IAAIyP,EAAa,GAClC,IAAM,OAASlQ,KAAKS,IAAIyP,EAAa,GACrC,GAAK,QAAUlQ,KAAKS,IAAIyP,EAAa,GACrC,QAAU,WAAalQ,KAAKS,IAAIyP,EAAa,GAC7C,MAAQ,YAAclQ,KAAKS,IAAIyP,EAAa,GAIxD,Y3B3BA,SAAiB7P,GAEb,GAAIA,GAAK,EACL,OAAOR,OAAOuQ,kBAIlB/P,IAKA,IAFAX,IAAI6B,EAAI4H,EAAa,GAEZpJ,EAAI,EAAGA,EAAI,GAAIA,IACpBwB,GAAK4H,EAAapJ,IAAMM,EAAIN,GAGhCO,IAAMyC,EAAMsN,UAAUhQ,EAGtB,OAAO+I,EAAapJ,KAAKuC,IAAIhB,GAAKwB,GAAO1C,EAAI,IAAOL,KAAKuC,IAAIQ,EACjE,kB4BbA,SAAuBvD,GACnB,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,kDAMpB,IAFAT,IAAIuB,EAAQ,EAEHlB,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAE/B,GAAIP,EAAEO,GAAK,EACP,MAAM,IAAII,MACN,6DAKRc,GAASzB,EAAEO,EACd,CAED,OAAOC,KAAKS,IAAIQ,EAAO,EAAIzB,EAAEC,OACjC,iBCrCA,SAAsBD,GAClB,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,iDAKpB,IAFAT,IAAI4Q,EAAgB,EAEXvQ,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAE/B,GAAIP,EAAEO,IAAM,EACR,MAAM,IAAII,MACN,wDAIRmQ,GAAiB,EAAI9Q,EAAEO,EAC1B,CAGD,OAAOP,EAAEC,OAAS6Q,CACtB,kECnBA,SAAexI,EAAM4H,GACjB,GAAIA,EAAW5H,EAAKrI,OAChB,OAAO,KAUXa,IAAMiQ,ECvBV,SAAuBzI,EAAM4H,GAMzBpP,IAGIP,EACA8C,EAJE2N,EAAmB,GACnBC,EAAuB,GAKzB/P,EAAW,EAGf,IAAKX,EAAI,EAAGA,EAAI+H,EAAKrI,OAAS,EAAGM,IAAK,CAClCO,IAAMoQ,EAAO,GACPC,EAAO,GAIb,IAAK9N,EAAI,EAAGA,EAAI6M,EAAW,EAAG7M,IAC1B6N,EAAKrN,KAAK,GACVsN,EAAKtN,KAAK,GAEdmN,EAAiBnN,KAAKqN,GACtBD,EAAqBpN,KAAKsN,EAC7B,CAED,IAAK5Q,EAAI,EAAGA,EAAI2P,EAAW,EAAG3P,IAK1B,IAJAyQ,EAAiB,GAAGzQ,GAAK,EACzB0Q,EAAqB,GAAG1Q,GAAK,EAGxB8C,EAAI,EAAGA,EAAIiF,EAAKrI,OAAS,EAAGoD,IAC7B4N,EAAqB5N,GAAG9C,GAAKF,OAAOuQ,kBAI5C,IAAK1Q,IAAIiE,EAAI,EAAGA,EAAImE,EAAKrI,OAAS,EAAGkE,IAAK,CAetC,IAZAjE,IAAIH,EAAM,EAGNqR,EAAa,EAEbC,EAAI,EAEJC,EAAK,EAKAzO,EAAI,EAAGA,EAAIsB,EAAI,EAAGtB,IAAK,CAE5B/B,IAAMyQ,EAAkBpN,EAAItB,EAAI,EAC1B2O,EAAMlJ,EAAKiJ,EAAkB,GAkBnC,GAJArQ,GALAkQ,GAAcI,EAAMA,IADpBzR,GAAOyR,GAMwBzR,IAT/BsR,EAaW,IAFXC,EAAKC,EAAkB,GAGnB,IAAKlO,EAAI,EAAGA,EAAI6M,EAAW,EAAG7M,IAMtB4N,EAAqB9M,GAAGd,IACxBnC,EAAW+P,EAAqBK,GAAIjO,EAAI,KAExC2N,EAAiB7M,GAAGd,GAAKkO,EACzBN,EAAqB9M,GAAGd,GACpBnC,EAAW+P,EAAqBK,GAAIjO,EAAI,GAI3D,CAED2N,EAAiB7M,GAAG,GAAK,EACzB8M,EAAqB9M,GAAG,GAAKjD,CAChC,CAKD,MAAO,CACH8P,iBAAkBA,EAClBC,qBAAsBA,EAE9B,CDjFqBQ,CALjBnJ,EAAOA,EAAKzG,QAAQC,MAAK,SAAUC,EAAGC,GAClC,OAAOD,EAAIC,CACnB,IAGyCkO,GAKrC,OE5BJ,SAAqB5H,EAAM0I,EAAkBd,GACzChQ,IAAIwC,EAAI4F,EAAKrI,OACPyR,EAAS,GACXC,EAAWzB,EAQf,IAJAwB,EAAOxB,GAAY5H,EAAKA,EAAKrI,OAAS,GAI/B0R,EAAW,GACdD,EAAOC,EAAW,GAAKrJ,EAAK0I,EAAiBtO,GAAGiP,GAAY,GAC5DjP,EAAIsO,EAAiBtO,GAAGiP,GAAY,EACpCA,IAGJ,OAAOD,CACX,CFUWE,CAAYtJ,EAHMyI,EAASC,iBAGSd,EAC/C,kBtBfA,SAAuB1E,EAAQQ,EAAYzG,QAAY,IAAAA,IAAAA,EAAG/E,KAAKgF,QAK3D,IAJAtF,IAAI2R,EAAe,KACfC,EAAelM,EAAO4F,EAAQQ,EAAYzG,GAC1CwG,EAAS,KACTgG,EAAS1R,OAAOuL,UACF,IAAXmG,GAEHF,EAAeC,EAEfC,EAASvF,GADTsF,EAAehG,GAAmBN,EAFlCO,EAASR,GAAYC,EAAQsG,GAEqB9F,GACX6F,GAE3C,MAAO,CACH9F,OAAQA,EACRN,UAAWqG,EAEnB,yDyBzBA,SAA0BxJ,GACtBpI,IAAI2C,EACAb,EAIEgQ,EAAa1J,EAAKrI,OAIxB,GAAmB,IAAf+R,EACAnP,EAAI,EACJb,EAAIsG,EAAK,GAAG,OACT,CAmBH,IAhBApI,IAOImM,EACArM,EACAoH,EATA6K,EAAO,EACPC,EAAO,EACPC,EAAQ,EACRC,EAAQ,EAaH7R,EAAI,EAAGA,EAAIyR,EAAYzR,IAK5B0R,GAHAjS,GADAqM,EAAQ/D,EAAK/H,IACH,GAIV2R,GAHA9K,EAAIiF,EAAM,GAKV8F,GAASnS,EAAIA,EACboS,GAASpS,EAAIoH,EASjBpF,EAAIkQ,EAAOF,GALXnP,GACKmP,EAAaI,EAAQH,EAAOC,IAC5BF,EAAaG,EAAQF,EAAOA,IAGJA,EAAQD,CACxC,CAGD,MAAO,CACHnP,EAAGA,EACHb,EAAGA,EAEX,yBCrDA,SAA8BqQ,GAI1B,OAAO,SAAUrS,GACb,OAAOqS,EAAGrQ,EAAIqQ,EAAGxP,EAAI7C,CAC7B,CACA,eCbA,SAAoBA,GAChB,GAAiB,IAAbA,EAAEC,OACF,MAAM,IAAIU,MAAM,+CAIpB,IADAT,IAAIuB,EAAQ,EACHlB,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAC/B,GAAIP,EAAEO,GAAK,EACP,MAAM,IAAII,MACN,0DAGRc,GAASjB,KAAKuC,IAAI/C,EAAEO,GACvB,CAED,OAAOC,KAAKyC,IAAIxB,EAAQzB,EAAEC,OAC9B,UCnBA,SAAeoC,GACX,GAAIA,GAAK,GAAKA,GAAK,EACf,MAAM,IAAI1B,MAAM,2CAEpB,OAAOH,KAAKuC,IAAIV,GAAK,EAAIA,GAC7B,8BCHA,SAAmBrC,GACf,OAAOA,EAAEA,EAAEC,OAAS,EACxB,gFCMA,SAAsBsB,GAClB,OAAOa,EAAeb,EAAQ,GAClC,sBCVA,SAAmBvB,GACf,OAAOA,EAAE,EACb,SCQA,SAAcA,GAIV,OAAOsB,EAAWM,EAAY5B,GAClC,aCAA,SAAkBA,GAUd,IANAc,IAGIwR,EAHE5M,EAAQ,IAAI6M,IAIdC,EAAY,EAEPjS,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAAK,CAC/BL,IAAIuS,EAAW/M,EAAMgN,IAAI1S,EAAEO,SACVoI,IAAb8J,EACAA,EAAW,EAEXA,IAEAA,EAAWD,IACXF,EAAOtS,EAAEO,GACTiS,EAAYC,GAEhB/M,EAAMiN,IAAI3S,EAAEO,GAAIkS,EACnB,CAED,GAAkB,IAAdD,EACA,MAAM,IAAI7R,MAAM,wCAGpB,OAAO2R,CACX,kEC7BA,SAAyBM,EAASC,EAASC,EAAapQ,EAAG6C,GAQvD,QANUoD,IAANjG,IACAA,EAAI,UAEYiG,IAAhBmK,IACAA,EAAc,YAGE,aAAhBA,GACgB,YAAhBA,GACgB,SAAhBA,EAEA,MAAM,IAAInS,MACN,kEAmBR,IAdAG,IAKMiS,EALQrS,EAAKkS,GACLlS,EAAKmS,GAObG,EAAc,IAAItP,MAAMhB,GAGxBuQ,EAAUL,EAAQ/C,OAAOgD,GACzBK,EAAW1S,KAAK2C,MAAM8P,EAAQhT,OAAS,GAEpCM,EAAI,EAAGA,EAAImC,EAAGnC,IAAK,CAExB+E,EAAe2N,EAAS1N,GACxBzE,IAAMqS,EAAWF,EAAQpR,MAAM,EAAGqR,GAC5BE,EAAYH,EAAQpR,MAAMqR,EAAUD,EAAQhT,QAG5CoT,EAAoB3S,EAAKyS,GAAYzS,EAAK0S,GAGhDJ,EAAYzS,GAAK8S,CACpB,CAKDnT,IAAIoT,EAAmB,EACvB,GAAoB,aAAhBR,EACA,IAAK5S,IAAIK,EAAI,EAAGA,GAAKmC,EAAGnC,IAChBC,KAAKC,IAAIuS,EAAYzS,KAAOC,KAAKC,IAAIsS,KACrCO,GAAoB,QAGzB,GAAoB,YAAhBR,EACP,IAAK5S,IAAIK,EAAI,EAAGA,GAAKmC,EAAGnC,IAChByS,EAAYzS,IAAMwS,IAClBO,GAAoB,QAK5B,IAAKpT,IAAIK,EAAI,EAAGA,GAAKmC,EAAGnC,IAEhByS,EAAYzS,IAAMwS,IAClBO,GAAoB,GAMhC,OAAOA,EAAmB5Q,CAC9B,qBC5FA,SAA0B6Q,GAItB,IAHAzS,IAAM0S,EAAU,IAAI9P,MAAM6P,EAAStT,QAC7BwT,EAAe,CAACF,EAAS1R,SAEtBtB,EAAI,EAAGA,EAAIgT,EAAStT,OAAQM,IACjCiT,EAAQjT,GAAK,EAGjB,IAAKL,IAAIK,EAAI,EAAGA,EAAIgT,EAAStT,QACzB,GAAIuT,EAAQjT,GAAKA,EAAG,CAGhBL,IAAIwT,EAAW,EACXnT,EAAI,GAAM,IACVmT,EAAWF,EAAQjT,IAKvBO,IAAM6S,EAAOJ,EAASG,GACtBH,EAASG,GAAYH,EAAShT,GAC9BgT,EAAShT,GAAKoT,EAEdF,EAAa5P,KAAK0P,EAAS1R,SAC3B2R,EAAQjT,KACRA,EAAI,CAChB,MACYiT,EAAQjT,GAAK,EACbA,IAIR,OAAOkT,CACX,wBCzBA,SAA6BG,GAEzB,KAAIA,GAAU,GAAd,CAKA1T,IAAIF,EAAI,EAGJ6N,EAAwB,EAEtBC,EAAQ,GACV+F,EAAa,EAKjB,GAEI/F,EAAM9N,GAAMQ,KAAKyC,KAAK2Q,GAAUpT,KAAKS,IAAI2S,EAAQ5T,GAAM6T,EACvDhG,GAAyBC,EAAM9N,GAE/B6T,KADA7T,QAIK6N,EAAwB,OAEjC,OAAOC,CAxBN,CAyBL,WC5BA,SAAgBzL,GAMZ,OALU,IAANA,EACAA,EAAImH,EACGnH,GAAK,IACZA,EAAI,OAED7B,KAAKa,KAAK,GAAK4J,EAAqB,EAAI5I,EAAI,EACvD,YCZA,SAAiBrC,GAEb,IADAE,IAAIuB,EAAQ,EACHlB,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1BkB,GAASzB,EAAEO,GAEf,OAAOkB,CACX,8BCAA,SAAsBzB,EAAGyB,GAIrB,OAAO+C,EAFY5C,EAAY5B,GAEOyB,EAC1C,uECRA,SAAkBzB,EAAGgO,GACjB,GAAIhO,EAAEC,OAAS,EACX,OAAO,EAOX,IADAC,IAAIH,EAAM,EACDQ,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAC1BR,GAAOC,EAAEO,GAAG,GAQhB,IANAO,IAAMgT,EAAU/T,EAAMC,EAAEC,OAKpBiI,EAAe,EACV7E,EAAI,EAAGA,EAAIrD,EAAEC,OAAQoD,IAC1B6E,GAAgB1H,KAAKS,IAAI6S,EAAU9T,EAAEqD,GAAG,GAAI,GAOhD,IADAnD,IAAI6T,EAAM,EACDrR,EAAI,EAAGA,EAAI1C,EAAEC,OAAQyC,IAC1BqR,GAAOvT,KAAKS,IAAIjB,EAAE0C,GAAG,GAAKsL,EAAKhO,EAAE0C,GAAG,IAAK,GAM7C,OAAO,EAAIqR,EAAM7L,CACrB,uHC/BA,SAAwBlI,GACpBc,IAAMD,EAAIb,EAAEC,OAEZ,GAAIY,EAAI,EACJ,MAAM,IAAIF,MAAM,qDAQpB,IALAG,IACIC,EADEC,EAAYN,EAAKV,GAEnBgU,EAAsB,EACtBC,EAAsB,EAEjB1T,EAAI,EAAGA,EAAIM,EAAGN,IAEnByT,IADAjT,EAAYf,EAAEO,GAAKS,GACgBD,EACnCkT,GAAuBlT,EAAYA,EAAYA,EAAYA,EAG/D,OACMF,EAAI,KAAOA,EAAI,IAAMA,EAAI,KACzBA,GAAKA,EAAI,GAAKoT,GACXD,EAAsBA,GACvB,GAAKnT,EAAI,GAErB,0BC/BA,SAA+Bb,EAAGoH,GAe9B,IAdAtG,IAAMoT,EAAWlU,EACZ0L,KAAG,SAAEjK,EAAOiE,GAAU,MAAA,CAACjE,EAAOiE,MAC9B5D,MAAK,SAACC,EAAGC,GAAM,OAAAD,EAAE,GAAKC,EAAE,MACxB0J,KAAI,SAACyI,GAAI,OAAKA,EAAK,EAAE,IACpBC,EAAWhN,EACZsE,KAAG,SAAEjK,EAAOiE,GAAU,MAAA,CAACjE,EAAOiE,MAC9B5D,MAAK,SAACC,EAAGC,GAAM,OAAAD,EAAE,GAAKC,EAAE,MACxB0J,KAAI,SAACyI,GAAI,OAAKA,EAAK,EAAE,IAKpBE,EAAS3Q,MAAMwQ,EAASjU,QACxBqU,EAAS5Q,MAAMwQ,EAASjU,QACrBM,EAAI,EAAGA,EAAI2T,EAASjU,OAAQM,IACjC8T,EAAOH,EAAS3T,IAAMA,EACtB+T,EAAOF,EAAS7T,IAAMA,EAG1B,OAAOmH,EAAkB2M,EAAQC,EACrC,mBCZA,SAAwBtU,GACpB,GAAIA,EAAEC,OAAS,EACX,MAAM,IAAIU,MAAM,sDAQpB,IALAG,IACIC,EADEC,EAAYN,EAAKV,GAEnBuU,EAAuB,EACvBC,EAAqB,EAEhBjU,EAAI,EAAGA,EAAIP,EAAEC,OAAQM,IAE1BgU,IADAxT,EAAYf,EAAEO,GAAKS,GACiBD,EACpCyT,GAAsBzT,EAAYA,EAAYA,EAMlDD,IAAM2T,EAAoBzU,EAAEC,OAAS,EAG/ByU,EAA6BlU,KAAKa,KACpCkT,EAAuBE,GAGrB5T,EAAIb,EAAEC,OAGZ,OAAQY,EAAI2T,IAAwB3T,EAAI,IAAMA,EAAI,GAFnCL,KAAKS,IAAIyT,EAA4B,GAGxD,yECpCA,SAA+B1U,EAAGa,EAAG0E,GACjC,GAAiB,IAAbvF,EAAEC,OACF,MAAO,GAMXsF,EAAeA,GAAgB/E,KAAKgF,OAKpC,IAHA1E,IAAMb,EAASD,EAAEC,OACX2F,EAAS,GAENrF,EAAI,EAAGA,EAAIM,EAAGN,IAAK,CACxBO,IAAM4E,EAAQlF,KAAK2C,MAAMoC,IAAiBtF,GAE1C2F,EAAO/B,KAAK7D,EAAE0F,GACjB,CAED,OAAOE,CACX,6ECjBA,SAA0B4F,EAAQO,GAE9B,OAAO7J,EADQwK,GAAWlB,EAAQO,GAEtC,mECJA,SAA0BrL,EAAMG,EAAGY,GAC/B,OAAQf,EAAOG,EAAIY,IAAUZ,EAAI,EACrC,0DCGA,SAAeb,EAAG2U,GAWd,OATmBjU,EAAKV,GASH2U,IANVxT,EAAkBnB,GAGfQ,KAAKa,KAAKrB,EAAEC,QAI9B,mBCFA,SAAwB2S,EAASC,EAAS+B,GACtC9T,IAAMD,EAAI+R,EAAQ3S,OACZ4C,EAAIgQ,EAAQ5S,OAIlB,IAAKY,IAAMgC,EACP,OAAO,KAIN+R,IACDA,EAAa,GAGjB9T,IAAM+T,EAAQnU,EAAKkS,GACbkC,EAAQpU,EAAKmS,GACbpL,EAAkBF,EAAeqL,GACjCmC,EAAkBxN,EAAesL,GAEvC,GACqB,iBAAVgC,GACU,iBAAVC,GACoB,iBAApBrN,GACoB,iBAApBsN,EACT,CACEjU,IAAMkU,IACAnU,EAAI,GAAK4G,GAAmB5E,EAAI,GAAKkS,IACtClU,EAAIgC,EAAI,GAEb,OACKgS,EAAQC,EAAQF,GACjBpU,KAAKa,KAAK2T,GAAoB,EAAInU,EAAI,EAAIgC,GAEjD,CACL,uDC/CA,SAAyB+P,EAASC,GAC9B,IAAKD,EAAQ3S,SAAW4S,EAAQ5S,OAC5B,MAAM,IAAIU,MAAM,+BAQpB,IALAG,IAAMmU,EAAgBrC,EACjBlH,KAAI,SAAC1L,GAAC,OAAQqJ,MAAO,IAAK5H,MAAOzB,MACjC6P,OAAOgD,EAAQnH,KAAI,SAACtE,GAAM,MAAA,CAAGiC,MAAO,IAAK5H,MAAO2F,EAAI,KACpDtF,MAAI,SAAEC,EAAGC,GAAC,OAAKD,EAAEN,MAAQO,EAAEP,KAAA,IAEvByT,EAAO,EAAGA,EAAOD,EAAchV,OAAQiV,IAC5CD,EAAcC,GAAMA,KAAOA,EAI/B,IADAhV,IAAIiV,EAAY,CAACF,EAAc,GAAGC,MACzB3U,EAAI,EAAGA,EAAI0U,EAAchV,OAAQM,IAClC0U,EAAc1U,GAAGkB,QAAUwT,EAAc1U,EAAI,GAAGkB,OAChD0T,EAAUtR,KAAKoR,EAAc1U,GAAG2U,MAC5B3U,IAAM0U,EAAchV,OAAS,GAC7BmV,EAAoBH,EAAeE,IAEhCA,EAAUlV,OAAS,EAC1BmV,EAAoBH,EAAeE,GAEnCA,EAAY,CAACF,EAAc1U,GAAG2U,MAItC,SAASE,EAAoBH,EAAeE,GAExC,IADArU,IAAMgT,GAAWqB,EAAU,GAAKA,EAAUA,EAAUlV,OAAS,IAAM,EAC1DM,EAAI,EAAGA,EAAI4U,EAAUlV,OAAQM,IAClC0U,EAAcE,EAAU5U,IAAI2U,KAAOpB,CAE1C,CAID,IAFA5T,IAAImV,EAAU,EAEL9U,EAAI,EAAGA,EAAI0U,EAAchV,OAAQM,IAAK,CAC3CO,IAAM8E,EAASqP,EAAc1U,GACR,MAAjBqF,EAAOyD,QACPgM,GAAWzP,EAAOsP,KAAO,EAEhC,CAED,OAAOG,CACX,WCxCA,SAAgBrV,EAAGU,EAAMS,GACrB,OAAQnB,EAAIU,GAAQS,CACxB"}