/* * Encog(tm) Core v0.1 - Javascript Version * http://www.heatonresearch.com/encog/ * http://code.google.

com/p/encog-java/ * Copyright 2008-2012 Heaton Research, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ /** * The main Encog namespace. This is the only global property created by Encog. * @type {*} */ var ENCOG = ENCOG || { /** * The version of Encog that this is. * @property property * @type String * @final */ VERSION : '1.0', /** * The Encog platform being used. * @property property * @type String * @final */ PLATFORM : 'javascript', /** * The precision that Encog uses. * @property precision * @type String * @final */ precision : 1e-10, /** * A newline character. * @property property * @type String * @final */ NEWLINE : '\n',

/** * The Encog type for activation functions. * @property ENCOG_TYPE_ACTIVATION * @type String * @final */ ENCOG_TYPE_ACTIVATION : 'ActivationFunction', /** * The Encog type for RBF functions. * @property ENCOG_TYPE_ACTIVATION * @type String * @final */ ENCOG_TYPE_RBF : 'RBF' }; /** * The namespace function, used to define new namespaces. * @param namespaceString The namespace that is to be defined. * @method namespace * @return {Object} The newly created namespace, or existing one. */ ENCOG.namespace = function (namespaceString) { 'use strict'; var parts = namespaceString.split('.'), parent = window, currentPart = '', i, length; for (i = 0, length = parts.length; i < length; i += 1) { currentPart = parts[i]; parent[currentPart] = parent[currentPart] || {}; parent = parent[currentPart]; } return parent; }; ENCOG.namespace('ENCOG.ActivationSigmoid'); ENCOG.namespace('ENCOG.ActivationTANH'); ENCOG.namespace('ENCOG.ActivationLinear'); ENCOG.namespace('ENCOG.ActivationElliott'); ENCOG.namespace('ENCOG.ActivationElliottSymmetric'); ENCOG.namespace('ENCOG.RadialGaussian'); ENCOG.namespace('ENCOG.RadialMexicanHat'); ENCOG.namespace('ENCOG.Util'); ENCOG.namespace('ENCOG.MathUtil'); ENCOG.namespace('ENCOG.ArrayUtil'); ENCOG.namespace('ENCOG.BasicLayer'); ENCOG.namespace('ENCOG.BasicNetwork'); ENCOG.namespace('ENCOG.PropagationTrainer'); ENCOG.namespace('ENCOG.LinearErrorFunction'); ENCOG.namespace('ENCOG.LinearErrorFunction'); ENCOG.namespace('ENCOG.Swarm'); ENCOG.namespace('ENCOG.Anneal'); ENCOG.namespace('ENCOG.Genetic');

ENCOG.namespace('ENCOG.SOM'); ENCOG.namespace('ENCOG.TrainSOM'); ENCOG.namespace('ENCOG.ReadCSV'); ENCOG.namespace('ENCOG.EGFILE'); //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// MathUtil: The following code provides math utilities for Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * The math utilities for Encog. * @class MathUtil * @constructor */ ENCOG.MathUtil = function () { 'use strict'; }; /** * Calculate the hyperbolic tangent. * Unfortunately, Javascript does not have this built in. * @method tanh * @param x The value to calculate for. * @return {Number} The result from the calculation. */ ENCOG.MathUtil.tanh = function (x) { 'use strict'; var pos, neg; pos = Math.exp(x); neg = Math.exp(-x); return (pos - neg) / (pos + neg); }; /** * Calculate the sign of a number, return 0 for zero, * 1 for positive, -1 for negative. * @method sign * @param x The value to calculate for. * @return {Number} The result. */ ENCOG.MathUtil.sign = function (x) { 'use strict'; if (Math.abs(x) < ENCOG.precision) { return 0; } else if (x > 0) { return 1; } else { return -1; } }; /** * Calculate the euclidean distance between a1 and a2. Use the specified starti ng index and length.

* @param a1 The first array to consider. * @param a2 The second array to consider. * @param startIndex The starting index. * @param len The length. * @method euclideanDistance * @return {Number} */ ENCOG.MathUtil.euclideanDistance = function (a1, a2, startIndex, len) { 'use strict'; var result = 0, i, diff; for (i = startIndex; i < (startIndex + len); i += 1) { diff = a1[i] - a2[i]; result += diff * diff; } return Math.sqrt(result); }; /** * Determine which multi-dimensional array element, from lst, is the nearest to a1. * @param a1 A single-dimension array that is searched for in lst. * @param lst A 2d array that contains arrays with the same length of a1. * @param k The number of neighbors to find. * @param maxDist The maximum distance to consider. * @param startIndex The starting index. * @param len The length. * @return {Array} The k elements from lst that were the closest to a1. */ ENCOG.MathUtil.kNearest = function (a1, lst, k, maxDist, startIndex, len) { 'use strict'; var result = [], tempDist = [], idx = 0, worstIdx = -1, dist, agent; while (idx < lst.length) { agent = lst[idx]; if (a1 !== agent) { dist = ENCOG.MathUtil.euclideanDistance(a1, agent, startIndex, len); if (dist < maxDist) { if (result.length < k) { result.push(agent); tempDist.push(dist); worstIdx = ENCOG.ArrayUtil.arrayMaxIndex(tempDist); } else { if (dist < tempDist[worstIdx]) { tempDist[worstIdx] = dist; result[worstIdx] = agent; worstIdx = ENCOG.ArrayUtil.arrayMaxIndex(tempDist); } } } } idx += 1; } return result; }; /**

* Generate a random floating point number. * @param low The first array to consider. * @param high The second array to consider. * @method randomFloat * @return {Number} */ ENCOG.MathUtil.randomFloat = function (low, high) { 'use strict'; return (Math.random * (high - low)) + low; }; //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// ArrayUtil: The following code provides array utilities for Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * The Encog array utilities. * @class ArrayUtil * @constructor */ ENCOG.ArrayUtil = function () { 'use strict'; }; /** * Fill an array with a specific value. * @method fillArray * @param arr The array to fill. * @param start The starting index. * @param stop The stopping index. * @param v The value to fill. */ ENCOG.ArrayUtil.fillArray = function (arr, start, stop, v) { 'use strict'; var i; for (i = start; i < stop; i += 1) { arr[i] = v; } }; /** * Create a new floating point array. * @param sz The size of the array to create. * @method newFloatArray * @return {Array} */ ENCOG.ArrayUtil.newFloatArray = function (sz) { 'use strict'; var result; result = []; while (sz > 0) { result.push(0.0); sz-=1; } return result; };

var i.randomizeArray = function (arr. * @method randomizeArray */ ENCOG. for (i = 0. i < stop. }. stop.ArrayUtil. i += 1) { row = arr[i]. j < row. for (j = 0. low. * @method randomizeArray2D . * @method fillArray2D */ ENCOG.ArrayUtil.fillArray2D = function (arr./** * Create a new int array. high). high) { 'use strict'. i < arr.randomFloat(low. * @param high The high-end of the random range. /** * Randomize a 2D array. start. v) { 'use strict'.push(0). var i.length. * @param high The high-end of the random range. * @param v The value to fill the array with. i += 1) { arr[i] = ENCOG. /** * Randomize an array. * @param arr The size of the array to create.length. } }. * @param start The starting index in the array. * @param low The low-end of the random range. j += 1) { row[j] = v.newIntArray = function (sz) { 'use strict'. var result. * @param arr The array to randomize. j. * @method newIntArray * @return {Array} */ ENCOG. * @param sz The size of the array to create. * @param low The low-end of the random range. while ((sz -= 1) > 0) { result. for (i = start. } } }. row. * @param arr The array to randomize. } return result. * @param stop The stopping index in the array. /** * Fill a 2D array.MathUtil. result = [].ArrayUtil.

for (i = 0. i < arr. * @method arrayCopy * @param source The source array.ArrayUtil. /** * Allocate a 2D array of booleans. } } }. col += 1) { = false. }. temp[col] } result[row] = } return result. row temp = []. * @param cols The number of columns. } return result. result = [].randomizeArray2D = function (arr.MathUtil. for (j = 0. i += 1) { row = arr[i]. * @return {Array} The allocated array. for (col = 0. row += 1) { col < cols. /** * Copy an array. j. /** * Allocate an array of zeros of the specified size.*/ ENCOG. i < x. for (i = 0.ArrayUtil.ArrayUtil.allocate1D = function (x) { 'use strict'. var i.randomFloat(low.length.length. col. }. j += 1) { row[j] = ENCOG. < rows. * @param rows The number of rows. row. i += 1) { result[i] = 0. result = [ [] ]. for (row = 0. j < row. row. */ ENCOG. result. * @method allocate1D * @param x The size of the array. low. high). temp. var result. cols) { 'use strict'.allocateBoolean2D = function (rows. */ ENCOG. temp. high) { 'use strict'. var i. * @param sourceStart The index to start at in the source. .

count) { 'use strict'. * @method arrayMean * @param a1 A 2D array. item[col] } result[row] = } return result. item. idx) { 'use strict'. for (row = 0. for (i = 0.generateBenchmarkData = function (rowCount. colCount) { 'use strict'. i += 1) { target[i + targetStart] = source[i + sourceStart]. i < a1. i. for (i = 0. col += 1) { = (Math. col. sourceStart. * @param idx The second dimension in a1 to calculate the mean of. * @param count The count of values to copy. * @method generateBenchmarkData * @param rowCount The number of rows to generate. */ ENCOG. } }. result = 0. * @param targetStart The target starting index.arrayCopy = function (source. return result.ArrayUtil. . row item = []. * @return {Number} The mean of each idx element of a1. var i. */ ENCOG. */ ENCOG. * @return {Array} The resulting array.length.length. for (col = 0. }. row. targetStart.* @param target The target array.ArrayUtil. i < count. * @param colCount The number of columns to generate.arrayMean = function (a1. /** * Generate benchmark data. target. }. /** < rowCount. This is a random training set. var result. /** * Calculate the mean of one dimension in the 2D array a1.ArrayUtil. i += 1) { result += a1[i][idx].random() * 2) . row += 1) { col < colCount. result = [ [] ].1. } result /= a1. item. var result.

i < a1. i += 1) { if (a1[i] < result) { result = a1[i]. }.length. i. resultIndex = i. i += 1) { if (a1[i] > result) { result = a1[i]. */ ENCOG.ArrayUtil. i. and ending. If the quotes are there. /** * Determine the index of the maximum value in an array.arrayMaxIndex = function (a1) { 'use strict'. * @return {String} The string. } } return resultIndex.length. * @method arrayMinIndex * @param a1 A 1D array. result = Number. if (s[0] === '\"' || s[0] === '\'') { . * @return {Number} Index of the minimum value in the array. var result. * @return {Number} Index of the maximum value in the array.stripQuotes = function (s) { 'use strict'. i < a1. resultIndex = -1. * @method arrayMinIndex * @param a1 A 1D array. for (i = 0. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Util: The following code provides general utilities for Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Strip leading. resultIndex. quotes from a string.Util. }. var l = s.length. result = Number.arrayMinIndex = function (a1) { 'use strict'. resultIndex = i. */ ENCOG.MAX_VALUE. */ ENCOG. * @method stripQuotes * @param s The string to process. resultIndex = -1.ArrayUtil.* Determine the index of the minimum value in an array. var result. resultIndex. with stripped quotes. } } return resultIndex.MIN_VALUE. for (i = 0.

exp(-1 * x[i])).s = s. }. Only positive * numbers are generated. * @property NAME * @type String * @final */ NAME : "ActivationSigmoid". } }. /** * Calculate the activation function for the specified value. i += 1) { x[i] = 1. /** * The Encog Type of this object. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Activation Functions: The following code implements activation functions use d by Encog. for (i = start. }.ENCOG_TYPE_ACTIVATION.prototype = { /** * The name of this object. l . i < start + size. ENCOG. size) { 'use strict'. * @constructor * @class ActivationSigmoid */ ENCOG. * @param start The starting point in the array to calculate.substr(0. * @property encogType * @type String * @final */ encogType : ENCOG. } if (s[l .1] === '\"' || s[l . l -= 1. var i.0 + Math.substr(1). */ activationFunction : function (x. Do not use this activation function if negative number * output is desired. * @method activationFunction * @param x An array to calculate the values for.0 / (1. .1). } return s.ActivationSigmoid = function () { 'use strict'.ActivationSigmoid. start. // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * The sigmoid activation function takes on a sigmoidal shape.1] === '\'') { s = s. * @param size The size to calculate.

} }.ActivationSigmoid()./** * Calculate the derivative. */ derivativeFunction : function (b. /** * Create a Sigmoid activation function. /** * The hyperbolic tangent activation function takes the curved shape of the * hyperbolic tangent. ENCOG.ActivationSigmoid} The newly created activation function. * @property NAME * @type String * @final */ NAME : "ActivationTANH". * @param start The starting point in the array to calculate. return new ENCOG. Many activation derivatives can * be more efficiently calculated using the value after the regular * activation is calculated. encogType : ENCOG. /** * Calculate the activation function for the specified value.a). Use this activation function if both negative and positive * output is desired. For efficiency both the before and after * activation values are passed in. return a * (1. */ activationFunction : function (x.create = function () { 'use strict'. for (i = start. }. i += 1) { .ActivationSigmoid. start. * @method activationFunction * @param x An array to calculate the values for. * @param size The size to calculate. * @method create * @return {ENCOG. i < start + size. size) { 'use strict'. a) { 'use strict'.prototype = { /** * The name of this object. This activation function produces both positive and * negative output. */ ENCOG. var i. * @param a The value after the regular activation was calculated. }. * @return {Number} The result.0 . * @constructor * @class ActivationTANH */ ENCOG.ActivationTANH.ActivationTANH = function () { 'use strict'.ENCOG_TYPE_ACTIVATION. * @param b The value before the regular activation was calculated.

/** * The encog type of this object. }. This activation function is * primarily theoretical and of little actual use. */ ENCOG.x[i] = ENCOG.prototype = { /** * The name of this object. a) { 'use strict'.ActivationLinear.a * a). * @param b The value before the regular activation was calculated. * @property encogType * @type String * @final */ encogType : ENCOG. /** * Create a TANH activation function.ENCOG_TYPE_ACTIVATION. The input is * simply passed on. /** * Calculate the activation function for the specified value. For efficiency both the before and after * activation values are passed in.MathUtil.ActivationTANH(). return (1. * @method create * @return {ENCOG. }.0 . * @return {Number} The result. * @property NAME * @type String * @final */ NAME : "ActivationLinear". unmodified.ActivationLinear = function () { 'use strict'.tanh(x[i]). Many activation derivatives can * be more efficiently calculated using the value after the regular * activation is calculated. Usually an activation * function that scales between 0 and 1 or -1 and 1 should be used. /** * Calculate the derivative. return new ENCOG. ENCOG. to the output. } }. */ derivativeFunction : function (b.create = function () { 'use strict'. * @param a The value after the regular activation was calculated. .ActivationTANH. * @constructor * @class ActivationLinear */ ENCOG.ActivationTANH} The newly created activation function. /** * The Linear layer is really not an activation function at all. } }.

prototype = { /** * The name of this object. /** * Computationally efficient alternative to ActivationSigmoid. return new ENCOG.* @method activationFunction */ activationFunction : function () { 'use strict'.7204&rep=rep1&typ e=pdf * @constructor * @class ActivationElliott */ ENCOG. and it is derivable. 1993 * http://citeseerx. }. } }. /** * Calculate the derivative.0. * @property encogType .1. * Its output is in the range [0.ActivationElliott. * @method create * @return {ENCOG. * @property NAME * @type String * @final */ NAME : "ActivationElliott".46. * * It will approach the 0 and 1 more slowly than Sigmoid so it * might be more suitable to classification tasks than predictions tasks. ENCOG. */ derivativeFunction : function () { 'use strict'. 1]. /** * The encog type of this object.edu/viewdoc/download?doi=10.ist.ActivationTANH} The newly created activation function. Many activation derivatives can * be more efficiently calculated using the value after the regular * activation is calculated.psu.L.1.create = function () { 'use strict'. */ ENCOG. For efficiency both the before and after * activation values are passed in. * * Elliott.ActivationLinear(). return 1. /** * Create a Linear activation function. * @return {Number} The result.ActivationLinear. D. }. }. "A better activation function for artificial neural networks".ActivationElliott = function () { 'use strict'.

} }. * . /** * Calculate the derivative. */ activationFunction : function (x. return this.5. * @param start The starting point in the array to calculate. * * It will approach the -1 and 1 more slowly than Tanh so it * might be more suitable to classification tasks than predictions tasks. size) { 'use strict'. 1]. for (i = start.create = function (s) { 'use strict'.slope)) * (1 + Math. abs(b * this.ActivationElliott(). * @param b The value before the regular activation was calculated.slope) / 2) / (1 + Math.slope / (2.slope))). var result = new ENCOG. * @param a The value after the regular activation was calculated. /** * Calculate the activation function for the specified value.ActivationElliott. * @return {Number} The result. * @method create * @return {ENCOG.0 + Math. Many activation derivatives can * be more efficiently calculated using the value after the regular * activation is calculated. return result.ENCOG_TYPE_ACTIVATION.slope = s || 1. result.abs(b * this. start. * @param size The size to calculate. // /** * Computationally efficient alternative to ActivationTANH. * Its output is in the range [-1. } }.ActivationElliott} The newly created activation function. slope : 1. }. * @method activationFunction * @param x An array to calculate the values for.abs(x[i] * this.0 * (1. /** * Create a Elliott activation function. */ derivativeFunction : function (b. var i. i += 1) { x[i] = ((x[i] * this. */ ENCOG.* @type String * @final */ encogType : ENCOG. For efficiency both the before and after * activation values are passed in. and it is derivable. a) { 'use strict'. i < start + size.slope)) + 0.

.ActivationElliottSymmetric = function () { 'use strict'. i < start + size. * @param b The value before the regular activation was calculated.ENCOG_TYPE_ACTIVATION.prototype = { /** * The name of this object. * @property encogType * @type String * @final */ encogType : ENCOG.edu/viewdoc/download?doi=10. "A better activation function for artificial neural networks".psu.slope) / (1 + Math. */ activationFunction : function (x.ActivationElliottSymmetric. var i. i += 1) { x[i] = (x[i] * this.0 + Math.ist. } }.L. For efficiency both the before and after * activation values are passed in.* Elliott.abs(x[i] * this. * @param a The value after the regular activation was calculated. /** * The encog type of this object. }.abs(b * this.7204&rep=rep1&typ e=pdf * @constructor * @class ActivationElliottSymmetric */ ENCOG.1. * @return {Number} The result. * @method activationFunction * @param x An array to calculate the values for. * @param size The size to calculate.46. Many activation derivatives can * be more efficiently calculated using the value after the regular * activation is calculated. ENCOG. slope : 1. } }. 1993 * http://citeseerx. for (i = start.1. return this. /** * Calculate the activation function for the specified value.slope)). var d = (1. D.slope / (d * d). * @property NAME * @type String * @final */ NAME : "ActivationElliottSymmetric".slope)). * @param start The starting point in the array to calculate. size) { 'use strict'. */ derivativeFunction : function (b. start. /** * Calculate the derivative. a) { 'use strict'.

/** * Calculate the activation function for the specified value.0 * this.ActivationElliottSymmetric} The newly created activation funct ion.prototype = { /** * The name of this object.width). * @method create * @return {ENCOG.ENCOG_TYPE_RBF. var result = new ENCOG. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Radial Basis Functions: The following code implements Radial Basis Functions used by Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// ENCOG.RadialGaussian = function () { 'use strict'. peak : 1. 2) / (2. return result. center : []. i += 1) { value += Math. * @property NAME * @type String * @final */ NAME : "RadialGaussian". for (i = 0./** * Create Elliott Symmetric activation function. ENCOG. width : 1. }.width * th is. }. */ calculate : function (x) { 'use strict'.ActivationElliottSymmetric.center. */ ENCOG. . i < this. * @property encogType * @type String * @final */ encogType : ENCOG. i.center[i].create = function (s) { 'use strict'. var value = 0.slope = s || 1.pow(x[i] . result. * @method calculate * @param x An array to calculate the values for.length.ActivationElliottSymmetric().this.RadialGaussian. /** * The encog type of this object.

prototype = { /** * The name of this object.exp(-value). result. result. }. * @property NAME * @type String * @final */ NAME : "RadialMexicanHat". theWidth) { 'use strict'. /** * Create a gaussian RBF.ActivationElliottSymmetric} The newly created activation funct ion. peak : 1.this. i < this. for (i = 0.centers = theCenters.} return this.length. theCenters.RadialGaussian. */ ENCOG.peak * Math. /** * Calculate the activation function for the specified value. center : []. } . */ calculate : function (x) { 'use strict'. // calculate the "norm". ENCOG. /** * The encog type of this object.RadialGaussian(). return result.RadialMexicanHat. } }.RadialMexicanHat = function () { 'use strict'.ENCOG_TYPE_RBF.center[i].center. var result = new ENCOG. i.create = function (thePeak.width = theWidth || 1.pow(x[i] . * @property encogType * @type String * @final */ encogType : ENCOG. ENCOG. }. * @method create * @return {ENCOG. * @method calculate * @param x An array to calculate the values for. result. width : []. i += 1) { norm += Math. but don't take square root // don't square because we are just going to square it var norm = 0.peak = thePeak || 1. 2).

* @method calculateError * @param ideal The ideal output. The linear function simply subtracts * desired from actual values.// calculate the value return this. }.centers = theCenters.LinearErrorFunction} .peak = thePeak || 1. i += 1) { error[i] = ideal[i] .actual[i]. result. * @return {ENCOG. result. */ ENCOG. } } }. * @method create * @return {ENCOG. var result = new ENCOG. /** * Create a Mexican Hat RBF. theCenters. error) { 'use strict'. * @class LinearErrorFunction * @constructor */ ENCOG.peak * (1 .RadialMexicanHat.length.create = function (thePeak.ActivationElliottSymmetric} The newly created activation funct ion. /** * Create the linear error function.LinearErrorFunction. */ calculateError : function (ideal. actual. result.prototype = { /** * Calculate the error value for the ideal and actual results. ENCOG. }.RadialMexicanHat(). return result.LinearErrorFunction = function () { 'use strict'. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Error Functions: The following implements Error Functions ued by Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * The Linear Error function is used to calculate the value that propagation * training seeks to minimize. theWidth) { 'use strict'.width = theWidth || 1. * @param actual The actual output. for (i = 0.exp(-norm / 2). * @param error The resulting error.norm) * Math. i < actual. } }. var i.

this class is no longer used. this * property points to those other layers. * @property biasActivation * @type {number} */ biasActivation : null.*/ ENCOG. /** * The activation level for the bias neuron.LinearErrorFunction. * @type {number} */ count : null.BasicLayer = function () { 'use strict'. /** * The neuron count for this layer. * @property activation * @type {Object} */ activation : null. ENCOG. }. Once * the neural network is created. * @property contextFedBy * @type {number} . /** * The activation function to use for this layer.create = function () { 'use strict'. /** * If this layer has context fed by other layers. * @property NAME * @type String * @final */ NAME : 'BasicLayer'.LinearErrorFunction(). return new ENCOG. or zero if none. * @property count The neuron count. Typically 1 * if there are bias neurons.prototype = { /** * The name of this object. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Feedforward Neural Network: The following code implements Feedforward neural networks for Encog // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * The BasicLayer class is used to specify neural networks. }. * @class BasicLayer * @constructor */ ENCOG.BasicLayer.

hasBias() ? 1 : 0) + this. * @param count The neuron count for this layer. * @method calcTotalCount * @return {*} */ calcTotalCount : function () { 'use strict'. * @param biasActivation The bias activation for this layer. or 0 for none. * @return {Boolean} True. /** * Calculate the count of context neurons. /** * Determine if this layer has bias. /** * Calculate the total count.hasBias() ? 1 : 0). if (activation.count + (this.biasActivation = biasActivation. */ hasBias : function () { 'use strict'. * @param activation The activation function used by this layer. result. of neurons. result.encogType !== ENCOG. } result = new ENCOG.BasicLayer} The newly created layer. */ calcContextCount : function () { 'use strict'.precision. * @return {*} The count of context neurons.ENCOG_TYPE_ACTIVATION) { throw new Error("Invalid activation function.count. } }. */ ENCOG.contextFedBy === null) { return this.*/ contextFedBy : null. if (this.count + (this. } else { return this.count = count.create = function (activation. }."). } } }.count. result.contextFedBy. count.contextFedBy === null) { return 0. if this layer has bias.BasicLayer(). /** * Create a BasicLayer. . return Math.contextFedBy. specify * 1 (or desired activation) to have a bias neuron. biasActivation) { 'use strict'. var result.activation = activation. if (this.biasActivation) > ENCOG.BasicLayer.abs(this. including bias. * @return {ENCOG. } else { return this.

/** * The individual layer neuron counts. }. }.contextFedBy = null. /** * The output neuron count. * @property layerContextCount * @type Array */ layerContextCount : null. * @property NAME * @type String * @final */ NAME : 'BasicNetwork'. /** * Basic Network.BasicNetwork = function () { 'use strict'. * @property inputCount * @type number */ inputCount : null.result. /** * The individual layer neuron context counts. return result. /** * The input neuron count. provides neural network functionality. /** * The individual layer indexes. /** * The weight indexes. * * @class BasicNetwork * @constructor **/ ENCOG.prototype = { /** * The name of this object. * @property layerIndex . ENCOG. * @property weightIndex * @type Array */ weightIndex : null. * @property outputCount * @type number */ outputCount : null.BasicNetwork. * @property layerCounts * @type Array */ layerCounts : null.

/** * The context target sizes. /** * The layer to end training at.e. * @property weights * @type Array */ weights : null. These are neurons that are actually fed from * the previous layer (i. /** * The activation functions. * @property layerFeedCounts * @type Array */ layerFeedCounts : null.* @type Array */ layerIndex : null. /** * The context target feed counts. * @property biasActivation * @type Array */ biasActivation : null. /** * The activation level for bias neurons on this layer. * @property beginTraining * @type Number */ beginTraining : null. * @property activationFunctions * @type Array */ activationFunctions : null. /** * The layer to begin training at. * @property contextTargetSize * @type Array */ contextTargetOffset : null. * @property contextTargetSize * @type Array */ contextTargetSize : null. * @property endTraining * @type Number */ endTraining : null. /** * The layer feed counts. . /** * The weights of the neural network. not bias or context).

index += 1.layerIndex. index + this. randomize : function () { 'use strict'. index = 0. i += 1) { hasBias = (this. i += 1) { this. /** * The layer sums.layerCounts[i]. clearContext : function () { 'use strict'. * @property layerOutput * @type Array */ layerOutput : null. } // fill in context ENCOG. index += this.layerFeedCounts[i]. var index. } }.layerFeedCounts[i]) !== this. hasBias. var i.weights[i] = (Math. index += this. * @property layerOutput * @type Number */ connectionLimit : ENCOG.laye rFeedCounts[i]. for (i = 0.biasActivation[i]. 0).layerOutput[index] = this.random() * 2. * @property layerOutput * @type Array */ layerSums : null.layerContextCount[i] + this.layerOutput.layerOutput.weights.0) .laye rContextCount[i]. i < this. index. for (i = 0.0.ArrayUtil./** * The layer outputs. /** * The connection limit. i.fillArray(this.length. i < this.layerContextCount[i].1.precision. // fill in the bias if (hasBias) { this. } }. 0). index. index + this. . // fill in regular neurons ENCOG.length.fillArray(this. computeLayer : function (currentLayer) { 'use strict'.ArrayUtil.

layerOutput. for (y = inputIndex.layerIndex[currentLayer].ArrayUtil. offset. i -= 1) { this. index. y < limitY.arrayCopy(this. offset.weightIndex[currentLayer .1].layerOutput[y].arrayCopy(this. y += 1) { sum += this. outputIndex = this.layerIndex[currentLayer .layerOutput[x] = sum.layerOutput. inputSize. inputIndex = this. x < limitX.var inputIndex.arrayCopy(input.contextTargetOffset[currentLayer]. offset. outputIndex. limitX. x += 1) { sum = 0. this. sum. outputSize ).contextTargetOffset[0]. i.layerOutput. ENCOG. y.layerSums[x] = sum.activationFunction( this. limitX = outputIndex + outputSize. output) { 'use strict'. this.ArrayUtil. var sourceIndex.layerOutput. ENCOG. output.arrayCopy(this. outputSize. compute : function (input. outputIndex. 0. sourceIndex.layerOutput.length .layerCounts. this.1]. 0.contextTargetSize[0]). this. . for (i = this.layerCounts[currentLayer]. this.this. sourceIndex = this.weights[index] * this.layerFeedCounts[currentLayer . index = this.layerOutput.ArrayUtil. outputSize = this.length . 0.length . this.ArrayUtil. }. ENCOG. this.layerOutput.contextTargetSize[currentLayer]).1].layerIndex. this. i > 0.1].layerCounts[this.layerOutput.outputCou nt). inputSize = this. offset.activationFunctions[currentLayer . x. } this.1. outputIndex. index += 1. limitY = inputIndex + inputSize. // weight values for (x = outputIndex. 0.inputCount). ENCOG. } // update context values offset = this. } this.computeLayer(i). // update context values offset = this.1]. limit Y.

result. nextLa yer. result. j += 1) { diff = ideal[j] . input.inputCount = layers[0]. nextLayer = null.allocate1D(layerCount). for (i = layers. i < inputData.allocate1D(layerCount). j. setSize = 0. result.length.ArrayUtil.ArrayUtil.ArrayUtil. i >= 0.allocate1D(layerCount). result. for (i = 0.allocate1D(layerCount).BasicNetwork.layerIndex = ENCOG. result.ArrayUtil. setSize. ideal.output[j]. globalError. neuronIndex.}. i. evaluate : function (inputData.ArrayUtil. globalError += diff * diff.length.length . result. diff. i += 1) { input = inputData[i].biasActivation = ENCOG. weightCount = 0. ENCOG. result. weightCount. layer.layerCounts = ENCOG.ArrayUtil.count.allocate1D(layerCount).1]. this. j < ideal.ArrayUtil. . for (j = 0. neuronCount.ArrayUtil. index. result.BasicNetwork(). result = new ENCOG.activationFunctions = ENCOG. result. index = 0. output).length. result. var layerCount. output.create = function (layers) { 'use strict'. result.compute(input.outputCount = layers[layerCount . result.1]. if (i > 0) { nextLayer = layers[i . } }. j. idealData) { 'use strict'. globalError = 0. i -= 1) { layer = layers[i]. neuronCount = 0. if( layers!=null ) { layerCount = layers.count. var i. ideal = idealData[i].layerFeedCounts = ENCOG.1.contextTargetOffset = ENCOG.allocate1D(layerCount). output = [].layerContextCount = ENCOG.ArrayUtil.weightIndex = ENCOG.allocate1D(layerCount).allocate1D(layerCount). setSize += 1.allocate1D(layerCount). } } return globalError / setSize.contextTargetSize = ENCOG.

allocate1D(weightCount). result. neuronCount += layer. result.layers[j] .1] + result.calcTotalCount().count.calcContextCount(). result. result. } result. if (nextLayer !== null) { weightCount += layer. * * @class PropagationTrainer * @constructor **/ ENCOG.activation. } if (index === 0) { result.calcContextCount (). } else { result.layerIndex[index] = result.layerCounts[index] * result. /** * Propagation training. result. result. } neuronIndex = 0.clearContext().layerSums = ENCOG. j -= 1) { if (layers[j].layerFeedCounts[index] = layer. }. result. result.1] + (result.ArrayUtil.layerCounts[index] = layer.weightIndex[index .beginTraining = 0. } index += 1.ArrayUtil. j >= 0.1].length .layerCounts.weightIndex[index] = result.hasContext = true.calcTotalCount() .weights = ENCOG. } return result. result.contextTargetSize[index] = layers[j].biasActivation.count * nextLayer.length .layerCounts[index .ArrayUtil.biasActivation[index] = layer.PropagationTrainer = function () { .contextFedBy === layer) { result.weightIndex[index] = 0.1]).endTraining = result.calcContextCount()).activationFunctions[index] = layer.calcTotalCount().} result.calcTotalCount().allocate1D(neuronCount). for (j = layers.layerFeedCounts[index .layerIndex[index] = 0.1.layerOutput = ENCOG.contextTargetOffset[index] = neuronIndex + (layers[j]. result. result.layerContextCount[index] = layer. includes RPROP and Back Propagation.calcTotalCount(). } neuronIndex += layers[j].allocate1D(neuronCount). result.1.layerIndex[index . result.

This is the percentage by which the deltas are increased by if * the partial derivative is greater than zero. ENCOG. This is specified by the resilient propagation * algorithm. This is the percentage by which the deltas are increased by if * the partial derivative is less than zero.'use strict'. */ MAX_STEP : 50. */ DELTA_MIN : 1e-6.5. /** * The maximum amount a delta can reach. * @property NAME * @type String * @final */ NAME : 'PropagationTrainer'.2. */ NEGATIVE_ETA : 0.prototype = { /** * The name of this object. /** * The NEGATIVE ETA value. * * @property trainingInput * @type Array * @default null **/ trainingInput : null. * * @property network * @type Object * @default null **/ network : null. /** * The network that is being trained. /** * The minimum delta value for a weight matrix value. /** * The ideal results from training. * * @property trainingIdeal * @type Array * @default null **/ .PropagationTrainer. /** * The POSITIVE ETA value. */ POSITIVE_ETA : 1. }. /** * The input training data. This is specified by the resilient propagation * algorithm.

/** * The gradients. * * @property learningRate * @type number * @default null **/ learningRate : null. * * @property lastGradient * @type Array * @default null **/ lastGradient : null.Backpropagation. these are used to calculate the gradients. /** * The learning rate. * * @property type * @type String * @default null **/ type : null.Resilient propagation. /** * The type of training: * * "BPROP" . /** * The last gradients. /** * The layer detla's. * * @property momentum * @type number * @default null **/ momentum : null. /** . * "RPROP" . * * @property gradients * @type Array * @default null **/ gradients : null. /** * The momentum. * * @property layerDelta * @type Array * @default null **/ layerDelta : null.trainingIdeal : null.

index.layerIndex[currentLevel]. * * @property actual * @type number * @default null **/ actual : null. activation = this. * * @property lastDelta * @type Array * @default null **/ lastDelta : null. act ivation.flatSpot[currentLevel + 1]. sum.activationFunctions[currentLevel + 1].weightIndex[currentLevel].* The last weight deltas.create(). /** * The actual output from the neural network. currentFlatSpot = this. fromLayerSize = this. /** * The weight update values. toLayerSize. toLayerIndex = this.network. processLevel : function (currentLevel) { 'use strict'. y. fromLayerSize. yi. currentFlatSpot. index = this. toLayerSize = this.LinearErrorFunction.network. * * @property updateValues * @type number * @default null **/ updateValues : null. /** * The flat spot adjustment.network. output.network. fromLayerIndex = this.network. fromLayerIndex.network. * * @property flatSpot * @type number * @default null **/ flatSpot : null. xi. * * @property errorFunction * @type Function * @default LinearErrorFunction **/ errorFunction : ENCOG. x.layerIndex[currentLevel + 1]. var toLayerIndex. wi.layerFeedCounts[currentLevel]. /** * The error function.layerCounts[currentLevel + 1]. // handle weights .

gradients[i] * this. change = ENCOG.network. this.weights[wi] * this.MAX_STEP). . y < fromLayerSize.momentum). for (y = 0.network. thi s. weightChange.network. } }.gradients[i]. } else if (change < 0) { // if change<0.sign(this. for (x = 0.POSITIVE_ETA. learnRPROP : function () { 'use strict'. delta = Math.weights. delta = Math.lastDelta[i]. xi = toLayerIndex. and take the // sign.MathUtil. this. var i.updateValues[i] = delta. i.gradients[wi] += output * this.network.gradients[i]) * delta. We want to see if the gradient has changed its sign.layerSums[yi].DELTA_MIN). sum += this. } this. i += 1) { // multiply the current and previous gradient. var delta. this. x < toLayerSize.learningRate) + (this. learnBPROP : function () { 'use strict'.layerOutput[yi].weights.weights[i] += delta.min(delta. weightChange = 0.derivativeFunction(this.layerDelta[xi]. weightChange = ENCOG.lastDelta[i] * this.updateValues[i] * this. weightChange = -this. then the sign has changed.layerDelta[yi] = sum * (activation. xi += 1.MathUtil. y += 1) { output = this. sum = 0. wi = index + y.network.network. this.sign(this. yi += 1.network.max(delta. i += 1) { delta = (this. x += 1) { this. for (i = 0. then we increase the // delta so that it will converge faster if (change > 0) { delta = this. } }.lastGradient[i ]). this.yi = fromLayerIndex. for (i = 0.updateValues[i] = delta.layerDelta[xi]. // if the gradient has retained its sign. this. i < this.lastGradient[i] = this. i < this. and the last // delta was too big delta = this.updateValues[i] * this. wi += fromLayerSize.gradients[i] * this.lastDelta[i] = delta.length.length.layerOutput[yi]) + currentFlatSpot).NEGATIVE_ETA. delta. this. change.

this. s) { 'use strict'.trainingInput[i].network. this.setSize += 1.gradients[i]. i < this.lastDelta. . } else if (this. j += 1) { delta = this.layerDelta[i] = ((this. 0. } }.MathUtil. process : function (input. j.layerDelta[i] * s).length.fillArray(this. } else if (change === 0) { // if change==0 then there is no change to the delta delta = this. i += 1) { this.layerDelta).process(this. 1.learnRPROP(). ENCOG. this. } this.length.beginTraining. j < ideal.trainingInput.setSize = 0. this.length.length. i += 1) { this.length. this.lastDelta.gradients[i]) * delta.errorFunction.laye rOutput[i]) + this. this.actual = []. this.actual). } }. ENCOG. this.compute(input.sign(this.lastGradient[i] = this.globalError = this.calculateError(ideal. } if (this. } this.ArrayUtil.learnBPROP(). ideal. 0).activationFunctions[0] . delta.actual.globalError + (delta * delta).lastGradient[i] = 0. var i.updateValues[i]. i += 1) { this.layerSums[i]. for (i = 0.weights[i] += weightChange. this. 0.endTraining. i < this.type === "RPROP") { this.network.// set the previous gradient to zero so that there will be no // adjustment the next iteration this. iteration : function () { 'use strict'.0). } for (i = this.gradients.ArrayUtil.network.network.network.derivativeFunction(this. weightChange = ENCOG.network.processLevel(i).ideal[j]. this.network.actual. this.actual[j] . 0).flatSpot[0])) * (this.globalError = 0. i < this. var i. this.type === "BPROP") { this. this.trainingIdeal[i].fillArray(this. this.gradients. for (j = 0. for (i = 0.

0.newFloatArray(network.momentum = momentum. input. ENCOG. * * @class Swarm * @constructor **/ ENCOG. 0).weights.newFloatArray(network.newFloatArray(network.ArrayUtil.flatSpot = ENCOG.weights. 0. type.trainingInput = input.Swarm.newFloatArray(network.layerOutput. result.lastDelta = ENCOG. ENCOG.network = network.newFloatArray(network. return result.length). 0. result.gradients = ENCOG.length).ArrayUtil.globalError / this. ENCOG. }. var result = new ENCOG.1).trainingIdeal = ideal.type = type. ENCOG.ArrayUtil.PropagationTrainer. * @property NAME * @type String .weights.ArrayUtil.outputCount).newFloatArray(network.} this. 0. result.lastGradient.setSize.ArrayUtil. 0). result. result. result.layerDelta = ENCOG.ArrayUtil.prototype = { /** * The name of this object.newFloatArray(network.updateValues. result.error = this. result.length . } }.weights. ENCOG.Swarm = function () { 'use strict'.layerOutput.ArrayUtil. result.length ). result.learningRate = learningRate.lastGradient.flatSpot. ideal.updateValues.fillArray(result.length).fillArray(result.length .ArrayUtil.ArrayUtil.create = function (network.length). result.PropagationTrainer(). result.weights. momentum) { 'use strict'. result.fillArray(result. }. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Swarm: The following code implements Encog swarm // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Swarm algorithm.updateValues = ENCOG. network.length. learnin gRate.ArrayUtil.length). result.lastGradient = ENCOG. result.actual = ENCOG.

neighbors = ENCOG. dx.agents. separation . meanX. * @property agents * @type Array */ agents : null. neighbors. /** * A call back that is called with the neighbors of each agent. /** * The degree to which cohesion is applied to steering the agent.agents. var i.agents. this. i < this.length.agents[i]. 2). 10 . i += 1) { /////////////////////////////////////////////////////////////// // Begin implementation of three very basic laws of flocking.01. /** * The degree to which alignment is applied to steering the agent. meanY.neighbors).* @final */ NAME : 'Swarm'. turnAmount. * @property NAME * @type function * @final */ callbackNeighbors : null. this. * @property constAlignment * @type number */ constAlignment : 0. /** * The degree to which separation is applied to steering the agent. /** * An array of agents.agents[i]. /////////////////////////////////////////////////////////////// targetAngle = 0.MAX_VALUE.5. Number.kNearest(this. * Cohesion is the desire to move towards groups of other agents. * Separation is the desire to not be too close to another particle.kNearest(this. nearest = ENCOG. 5. alignment. nearest. iteration : function () { 'use strict'. // loop over all particles. * @property constCohesion * @type number */ constCohesion : 0. * @property constSeparation * @type number */ constSeparation : 0. * Alignment is the desire to keep all particles moving in the same directio n. 5. for (i = 0.MathUtil. 0.25. * Format: callbackNeighbors(currentIndex.MathUtil. cohesion. dy. targetAngle.

this.agents = agents. /////////////////////////////////////////////////////////////// // End implementation of three very basic laws of flocking.Swarm(). .agents[i] [2].ArrayUtil.this. cohesion = (Math.steer towards average position of neighbors (long r ange attraction) cohesion = 0.PI) . return result.steer towards average heading of neighbors alignment = 0.length > 0) { alignment = ENCOG. 0). separation += 180.arrayMean(this.agent s[i][2].atan2(dx. meanY = ENCOG. if (neighbors.this. Alignment .agents[i][1]. 0).agents[i][1].ArrayUtil. dx = meanX . if (neighbors.avoid crowding neighbors (short range repulsion) separation = 0. var result = new ENCOG. } // 2.ArrayUtil.this.agents[i][0].arrayMean(nearest.con stAlignment) + (separation * this. // The three default ratios that I provide work well. separation = (Math.ArrayUtil.this. ENCOG. 0.create = function (agents) { 'use strict'.agents[i][0].callbackNeighbors(i. dy) * 180 / Math. meanY = ENCOG. turnAmount = (cohesion * this.ArrayUtil. 1). 2) . dx = meanX . /////////////////////////////////////////////////////////////// } } }. } // perform the turn // The degree to which each of the three laws is applied is configur able..agents[i][2] += turnAmount.callbackNeighbors !== null) { this.PI) . 2).constCohesion) + (alignment * this. 1).atan2(dx. } if (this. if (nearest.arrayMean(this.Swarm. Separation .arrayMean(nearest. } // 3. result. dy = meanY .this.agents[ i][2].length > 0) { meanX = ENCOG.agents. // 1.this. Cohesion . dy = meanY . this. dy) * 180 / Math.agents.constSeparation).arrayMean(neighbors. neighbors).length > 0) { meanX = ENCOG.

/** * The starting temperature for each iteration. /** * The current solution. * * @property scoreSolution * @type Function * @default null **/ scoreSolution : null. /** * Randomize a solution according to the specified temperature. .prototype = { /** * The name of this object. The score funct ion * must accept an array of doubles and return a score. * @property NAME * @type String * @final */ NAME : 'Anneal'. * * @property scoreSolution * @type Function * @default null **/ randomize : null. * @property solution * @type Array */ solution : null.Anneal = function () { 'use strict'. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Anneal: The following code implements Simulated Annealing // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Simulated Annealing algorithm. }. * * @class Anneal * @constructor **/ ENCOG.}. The higher the * temperature the more randomness.Anneal. ENCOG. /** * A function that is used to score the potential solutions.

* * @property constCycles * @type number * @default 10.solution).Anneal(). i. temperature = this. return result.solution.0 **/ constCycles : 10. result. bestArray = this.solution = bestArray.randomize(this. temperature *= Math.0 **/ constStopTemp : 2. } this.slice().exp(Math.log(this. var result = new ENCOG. bestScore.solution = solution.solution. /** * The stopping temperature for each iteration.solution.0. if (curScore < bestScore) { bestArray = this.create = function (solution) { 'use strict'. /** * The number of cycles to go from the starting temperature to the stopping. bestScore = curScore.Anneal. curScore = this.solution). var bestArray.constCycles . iteration : function () { 'use strict'.* * @property constStartTemp * @type number * @default 10.slice(). . i < this. curScore. temperature). ENCOG.1)).constStopTemp / this. temperature.0 **/ constStartTemp : 10. for (i = 0.constStartTemp.constStartTemp) / (this.scoreSolution(this. } } }. * * @property constStopTemp * @type number * @default 2. }. i += 1) { this. bestScore = this.0.scoreSolution(this.slice().constCycles.

Genetic = function () { 'use strict'. ENCOG. * * @property mutate * @type Function * @default null **/ mutate : null. /** * A function that is used to score the potential solutions. /** * A function that will mutate the specified solution. The score funct ion * must accept an array of doubles and return a score. * * @property scoreSolution * @type Function * @default null **/ scoreSolution : null. }.Genetic. * * @class Genetic * @constructor **/ ENCOG. /** * Perform a crossover and return two offspring.prototype = { /** * The name of this object. * @property solution * @type Array */ population : null. * @property NAME * @type String * @final */ NAME : 'Genetic'.//////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Genetic Algorithm: The following code implements a Genetic Algorithm // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Genetic learning algorithm. crossover should be called as follows: . The mutation method must * access an array of doubles that will be mutated. /** * The current population.

matingPopulationSize.data). } .1.1 **/ constMutationPercent : 0.floor(this.child1.floor(Math.5.24.floor(this. countToMate = Math. iteration : function () { 'use strict'. this. this.data.population[motherID].length * this. motherID < countToMate.data. /** * The percent of the population that will mate.data. /** * The percent of offspring that will be mutated.population[offspringIndex + 1]. motherID++) { fatherID = Math. * * @property crossover * @type Function * @default null **/ crossover : null.* * crossover(mother.24 **/ constMatePercent : 0.population[offspringIndex].data).crossover( this. /** * The percent of the population that can be chosen for mating. offspringIndex.constMatePercent) .constMat ingPopulationPercent). if needed if (Math. offspringCount = countToMate * 2. fatherID.father.random() > this.population[offspringIndex]. matingPopulationSize = Math. this. * @property constMatingPopulationPercent * @type number * @default 0. // mate and form the next generation for (motherID = 0. m otherID. * @property constMatePercent * @type number * @default 0.length .population. * @property constMutationPercent * @type number * @default 0. // mutate. var countToMate. this.population[fatherID].offspringCount.5 **/ constMatingPopulationPercent : 0.mutate(this.constMutationPercent) { this. offspringCount.length * this.population.child2).random() * matingPopulationSize).population. offspringIndex = this.

sortPopulation().scoreSolution(d).data).population[0]. 'score' : l }.score = this. // move to the next one offspringIndex += 2.population[i] = { 'data' : d. this. l. getSolution : function () { return this. }.sort(function (a.score }). population[offspringIndex + 1]. var i.score = this. createPopulation : function (size.Genetic(). } this. } // score the two new offspring this. }. sortPopulation : function () { this.scoreSolution(this.scoreSolution(this. b) { return a.population = [].mutate(this. } }.population. i++) { d = generate(). ENCOG. this.create = function () { return new ENCOG.data). //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// SOM: The following code implements a Self Organizing Map // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * A self organizing map (SOM). * * @class SOM . i < size. }.score .data).constMutationPercent) { this.popu lation[offspringIndex].data. for (i = 0.sortPopulation().population[offspringIndex + 1]. generate) { 'use strict'. d.b. l = this.random() > this. } this. this.population[offspringIndex]. }.population[offspringIndex].Genetic.if (Math.

SOM. * @method classify * @param inputData The input data.* @constructor **/ ENCOG. } minDist = Number.inputCount) { throw new Error( "Can't classify SOM with input size of " + this.inputCount + " with input data of count " + inputData.length > this. result. /** * The input neuron count for the SOM * * @property inputCount * @type int * @default 0 **/ inputCount : 0.length). i. */ classify : function (inputData) { 'use strict'. /** * The output neuron count for the SOM * * @property outputCount * @type int * @default 0 **/ outputCount : 0. . ENCOG. * * @property weights * @type Array * @default null **/ weights : null. var minDist. dist.POSITIVE_INFINITY. if (inputData.SOM = function () { 'use strict'. /** * Determine which output neuron the input matches with best.prototype = { /** * The name of this object. * @property NAME * @type String * @final */ NAME : "SOM". /** * Holds the weights for the SOM. }.

weights = ENCOG. } } return result. correctionMatrix : null. this. theOutputCount) { 'use strict'. /** * Create a SOM network. som : null. result = i.SOM(). worstDistance : 0. * @method iteration */ iteration : function () { .ArrayUtil. /** * Perform a training iteration. // train SOM ENCOG. ENCOG.euclideanDistance(inputData. }.inputCount = theInputCount. * @property NAME * @type String * @final */ NAME : "SOM".prototype = { /** * The name of this object. } }.outputCount.result = -1. weights : null.5.MathUtil. i += 1) { dist = ENCOG. 0.inputCount). */ ENCOG. }. result. learningRate : 0. theInputC ount). i < this.SOM. for (i = 0. this. result. result. if (dist < minDist) { minDist = dist.TrainSOM.create = function (theInputCount.ActivationElliottSymmetric} The newly created activation funct ion.TrainSOM = function () { 'use strict'. * @method create * @return {ENCOG.weights[i].outputCount = theOutputCount. var result = new ENCOG.allocateBoolean2D(theOutputCount. return result. trainingInput : null.

}.randomizeArray2D(this. i < this. calculateBMU : function (input) { var result.worstDistance) { this. bmu. train : function (bmu.fillArray2D(this.length). this is the error for the entire network. } // Track the lowest distance so far.length > this. distance.outputCount. if (distance < lowestDistance) { lowestDistance = distance. -1.inputCount + " with input data of count " + input. if (lowestDistance > this.applyCorrection(). 0. i++) { input = this.getWorstDistance() / 100.som.'use strict'.weights.MathUtil. result = i. this is the BMU. i++) { distance = ENCOG.worstDistance = lowestDistance. ENCOG. input) { }.weights[i]. // Track the lowest distance. 1). lowestDistance. }.MathUtil. } } // Track the worst distance.som. for (i = 0.length).train(bmu.som.calculateBMU(input). i < this. // Reset the correction matrix for this synapse and iteration. input. this. result = 0. lowestDistance = Number.bmuUtil. if (input. this.0).length. reset : function () { ENCOG.euclideanDistance(this. var i.inputCount) { throw new Error( "Can't train SOM with input size of " + this.correctionMatrix. for (i = 0. .trainingInput. // Determine the BMU for each training element.som.POSITIVE_INFINITY. }. this.trainingInput[i]. input).weights[i]. 0). } return result. bmu = this.ArrayUtil. } // update the error //setError(this. i. inp ut.

}. /** * Holds the ideal data parsed from the CSV.ArrayUtil. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// Read CSV: The following is used to read CSV files // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Read data that is in CSV format. * * @property inputData * @type Array * @default null **/ inputData : null.prototype = { /** * Holds the regular expression for parsing. * @method create * @return {ENCOG. * . }.outputC ount. result. */ ENCOG. result.ActivationElliottSymmetric} The newly created activation funct ion.learningRate = theLearningRate.inputCount). ENCOG.som.ReadCSV.TrainSOM. /** * Create trainer for a SOM. * * @property regStr * @type String * @default null **/ regStr : null.correctionMatrix = ENCOG. this.som. return result. * * @class ReadCSV * @constructor **/ ENCOG.ReadCSV = function () { 'use strict'. result.allocateBoolean2D(this.TrainSOM(). /** * The input data parsed from the CSV.som = theSom.create = function (theSom. var result = new ENCOG. theLearningRate) { 'use strict'.applyCorrection : function () { } }.

readCSV : function (csv.push([]).inputData.delimiter)) { this.inputData = [ [] ].regStr. theInputCount. // allocate input and ideal arrays this. * * @property inputCount * @type int * @default null **/ inputCount : 0.inputCount = theInputCount.* @property idealData * @type Array * @default null **/ idealData : null. this. theIdealCount) { var currentIndex.push([]).length && (d != this. value. "gi"). regex = new RegExp(this.idealCount = theIdealCount. regex. /** * Holds the number of columns that make up the input data.idealData. this. currentIndex = 0. d. this.exec(csv)) { // obtain delimiter d = matches[ 1 ]. matches.' **/ delimiter : '. while (matches = regex.idealData = [ [] ]. this. } . currentIndex = 0. * * @property idealCount * @type int * @default null **/ idealCount : 0. /** * Holds the number of columns that make up the ideal data. /** * Holds the regular expression for parsing * @property delimiter * @type String * @default '. // new row if (d.'.

ReadCSV.push(value).push(parseInt(parts[i]. parts.length.inputData.10)). } // add value to either input or ideal if (currentIndex < this. ENCOG. result.length . result = [].delimiter = (theDelimiter || ".split('.'). i+=1) { result. var result.ReadCSV. }.ReadCSV. parts = str. result.idealData. var result = new ENCOG.idealData[ this. "g"). parts. } else { value = matches[ 3 ].create = function (theDelimiter) { 'use strict'.fromCommaListInt = function (str) { 'use strict'. for (i = 0.regStr = // Delimiters "(\\" + result. } else { this.push(value). i.'). result = []. ENCOG. parts = str.replace( new RegExp("\"\"". return result.delimiter + "\\r\\n]*))".split('.length .fromCommaListFloat = function (str) { 'use strict'. } currentIndex += 1. var result.ReadCSV().1 ]. }.inputData[ this. "\"" ). .").1 ]. ENCOG.inputCount) { this. i. i < parts.// do we need to remove quotes from value? if (matches[ 2 ]) { value = matches[ 2 ]. } } }. } return result.delimiter + "|\\r?\\n|\\r|^)" + // Quoted fields "(?:\"([^\"]*(?:\"\"[^\"]*)*)\"|" + // Standard fields "([^\"\\" + result.

EGFILE = function () { 'use strict'.ReadCSV.BasicNetwork. ENCOG.1. now = (new Date()). } result += arr[i].for (i = 0. }.3. .' + now + ENCOG . i < parts. } return result.NEWLINE. result += 'contextTargetOffset=' + ENCOG.NEWLINE.contextTarg etOffset) + ENCOG.endTraining + ENCOG. }.' + ENCOG. i+=1) { result. result += 'endTraining=' + obj.length. } return result. result += 'connectionLimit=' + obj.contextTarget Size) + ENCOG. i+=1) { if (i > 0) { result += '. i < arr. result = ''.getTime().push(parseFloat(parts[i])). result += '[BASIC]' + ENCOG.NEWLINE.connectionLimit + ENCOG.beginTraining + ENCOG. result += 'beginTraining=' + obj.length.1. result += 'contextTargetSize=' + ENCOG.NEWLINE. result += 'encog.NEWLINE. }.PLATFORM + '.toCommaList(obj.save = function (obj) { 'use strict'. //////////////////////////////////////////////////////////////////////////////// ///////////////////////////////// /// EGFILE: The following code is used for the processing of EG files // //////////////////////////////////////////////////////////////////////////////// //////////////////////////////// /** * Read data stored in the Encog EG format.0. var result.NEWLINE.EGFILE. now. result += '[BASIC:PARAMS]' + ENCOG. * * @class EGFILE * @constructor **/ ENCOG. ENCOG. var result = "".toCommaList = function (arr) { 'use strict'. af.'.toCommaList(obj.NEWLINE. i.ReadCSV. for (i = 0.NEWLINE. result += '[BASIC:NETWORK]' + ENCOG.NEWLINE. i.ReadCSV.

ReadCSV.loadBasicNetwork(str). while (lines[currentLine].length) { line = lines[currentLine].toCommaList(obj. result += 'biasActivation=' + ENCOG.length.EGFILE.NAME.NEWLINE.length === 0) { currentLine+=1.ReadCSV.trim().NEWLINE.NEWLINE. result += 'output=' + ENCOG.activationFunctions[i]. line.EGFILE. .NEWLINE.EGFILE. result) { var idx. currentLine. if (parts[0] !== 'encog') { throw new Error("Not a valid Encog EG file.NEWLINE. result += af. result += '\"' + ENCOG.layerContextC ount) + ENCOG.result += 'hasContext=' + (obj.hasContext ? 't' : 'f') + ENCOG.NEWLINE.split('. result += 'layerCounts=' + ENCOG.layerIndex) + ENCOG. var lines.load = function (str) { 'use strict'. result += 'weightIndex=' + ENCOG.toCommaList(obj.*([\n\r]+|$)/gm). result += 'outputCount=' + obj.NEWLINE. currentLine.toCommaList(obj.outputCount + ENCOG. } parts = lines[currentLine]. value.").NEWLINE. currentLine = 0. i+=1) { af = obj. result += 'weights=' + ENCOG.ReadCSV. while (currentLine < lines.ReadCSV.layerOutput) + ENCOG.toCommaList(obj.layerFeedCounts ) + ENCOG. parts. i < obj.layerCounts) + ENCO G. result += 'inputCount=' + obj. result += '\"'.'). for (i = 0. ENCOG._loadNetwork = function (lines.toCommaList(obj. result += 'layerIndex=' + ENCOG. } if (parts[1] === 'BasicNetwork') { return ENCOG.activationFunctions.trim().NEWLINE.toCommaList(obj. } }.ReadCSV.match(/^. name. } return result.biasActivation) + ENCOG.toCommaList(obj.inputCount + ENCOG.ReadCSV.trim().ReadCSV. } else { throw new Error("Encog Javascript does not support: " + parts[1]). result += 'layerContextCount=' + ENCOG. }. result += '[BASIC:ACTIVATION]' + ENCOG.weightIndex) + ENCO G. lines = str. NEWLINE. result += 'layerFeedCounts=' + ENCOG.weights) + ENCOG.toCommaList(obj.NEW LINE.NEWLINE. ENCOG.NEWLIN E.ReadCSV.

toLowerCase() == 'f'). } else if (name == 'hascontext') { result.ReadCSV. } else if (name == 'layerindex') { result.if (line[0] == '[') { break.layerIndex = ENCOG.fromCommaListFloat(value).beginTraining = parseInt(value).trim().ReadCSV. } currentLine++.hasContext = (value.ReadCSV. } else if (name == 'connectionlimit') { result.layerCounts = ENCOG.fromCommaListInt(value).substr(0. } else if (name == 'contexttargetsize') { result.connectionLimit = parseFloat(value).weights = ENCOG.outputCount = parseInt(value). } else if (name == 'layercontextcount') { result. } else if (name == 'weightindex') { result. } else if (name == 'layerfeedcounts') { result.layerOutput = ENCOG. } else if (name == 'output') { result.ReadCSV. idx). if (idx == -1) { throw new Error("Invalid line in BasicNetwork file: " + line).layerFeedCounts = ENCOG.fromCommaListInt(value). } else if (name == 'outputcount') { result. } else if (name == 'weights') { result.ReadCSV.substr(idx + 1).fromCommaListInt(value). } else if (name == 'endtraining') { result.fromCommaListFloat(value).fromCommaListInt(value).inputCount = parseInt(value).ReadCSV.contextTargetSize = ENCOG. if (name == 'begintraining') { result. } else if (name == 'layercounts') { result.weightIndex = ENCOG.toLowerCase().ReadCSV.ReadCSV.fromCommaListInt(value).fromCommaListInt(value). idx = line. } else if (name == 'inputcount') { result. } else if (name == 'contexttargetoffset') { result.trim(). value = line.fromCommaListInt(value).contextTargetOffset = ENCOG.indexOf('=').ReadCSV.layerContextCount = ENCOG. } name = line. } else if (name == 'biasactivation') { .endTraining = parseInt(value).

} } result. if (line[0] == '[') { break. ENCOG.activationFunctions[i] = ENCOG. currentLine = 0. 0).create().activationFunctions[i] = ENCOG. currentLine.ActivationTANH.cre ate(). } else if (line == 'ActivationElliottSymmetric') { result.ActivationSigmoid. i = 0.ActivationElliottSymmetric.fromCommaListFloat(value).fillArray(result. } else if (line == 'ActivationTANH') { result.activationFunctions[i] = ENCOG.").layerSums = [].length == 0) { currentLine++.biasActivation = ENCOG. if (parts[0] != 'encog') { throw new Error("Not a valid Encog EG file.match(/^. }. result.result. result.stripQuotes(line). return currentLine.activationFunctions[i] = ENCOG.loadBasicNetwork = function (str) { var lines.layerSums. } parts = lines[currentLine++].EGFILE.*([\n\r]+|$)/gm). } else if (line == 'ActivationSigmoid') { result. } else if (line == 'ActivationElliott') { result.length) { line = lines[currentLine++].').create().Util.layerSums. } i += 1. if (line == 'ActivationLinear') { result. ENCOG. } return currentLine. parts. while (lines[currentLine].trim().ActivationElliott.trim(). line.activationFunctions = [].ActivationLinear.create(). lines = str.ReadCSV. }. currentLine.activationFunctions[i] = ENCOG. 0._loadActivation = function (lines. result. while (currentLine < lines.trim(). result) { var i. . line. } line = ENCOG.EGFILE.create().ArrayUtil.split('. ENCOG.

result). while (currentLine < lines. . } else if (line == '[BASIC:ACTIVATION]') { currentLine = this.length) { line = lines[currentLine++].BasicNetwork(). } } return result. currentLine. if (line == '[BASIC:NETWORK]') { currentLine = this."). currentLine.} if (parts[1] != 'BasicNetwork') { throw new Error("Not a BasicNetwork EG file._loadActivation(lines. } result = new ENCOG. }.trim(). result)._loadNetwork(lines.

Sign up to vote on this title
UsefulNot useful