The code that trains a network will be written in C, for speed. Then we’ll wrap Cicada functions around the C code so that we can easily call it from a script.
NN.c
#include <math.h>
#include "NN.h"
// runNetwork(): evolves a neural network to a steady state
// Takes the params: 1 - weights; 2 - neuron activities; 3 - input; 4 - step size
// (& additionally, in training mode): 5 - target output; 6 - learning rate
int runNetwork()
{
neural_network myNN;
double *inputs, step_size, *target_outputs, learning_rate;
int i, numInputs, numOutputs;
/* ----- set up data types, etc. ----- */
for (i = 0; i < numInputs; i++)
myNN.activity[i] = inputs[i];
for (i = numInputs; i < myNN.numNeurons; i++)
myNN.activity[i] = 0;
if ( args.num == 6 ) { // i.e. if we're in training mode
if (getSteadyState(myNN, numInputs, step_size) != 0) return 1;
trainNetwork(myNN, -learning_rate);
for (i = 0; i < numOutputs; i++)
myNN.activity[numInputs + i] = target_outputs[i];
if (getSteadyState(myNN, numInputs+numOutputs, step_size) != 0) return 1;
trainNetwork(myNN, learning_rate); }
else if (getSteadyState(myNN, numInputs, step_size) != 0) return 1;
/* ----- save results ----- */
return 0; // no error
}
// getSteadyState() evolves a network to the self-consistent state x_i = f( W_ij x_j ).
int getSteadyState(neural_network NN, int numClamped, double StepSize)
{
const double max_mean_sq_diff = 0.001;
const long maxIterations = 1000;
double diff, sq_diff, input, newOutput;
int iteration, i, j;
if (numClamped == NN.numNeurons) return 0;
// keep updating the network until it reaches a steady state
for (iteration = 1; iteration <= maxIterations; iteration++) {
sq_diff = 0;
for (i = numClamped; i < NN.numNeurons; i++) {
input = 0;
for (j = 0; j < NN.numNeurons; j++) {
if (i != j) {
input += NN.activity[j] * NN.weights[i*NN.numNeurons + j];
}}
newOutput = 1./(1 + exp(-input));
diff = newOutput - NN.activity[i];
sq_diff += diff*diff;
NN.activity[i] *= 1-StepSize;
NN.activity[i] += StepSize * newOutput;
}
if (sq_diff < max_mean_sq_diff * (NN.numNeurons - numClamped))
return 0;
}
return 1;
}
// trainNetwork() updates the weights and biases using the Hebbian rule.
void trainNetwork(neural_network NN, double learningRate)
{
int i, j;
for (i = 0; i < NN.numNeurons; i++) {
for (j = 0; j < NN.numNeurons; j++) {
if (i != j) {
NN.weights[i*NN.numNeurons + j] += learningRate * NN.activity[i] * NN.activity[j];
}}}
}
NN.h
typedef struct {
int numNeurons; // 'N'
double *weights; // N x N array of incoming synapses
double *activity; // length-N vector
} neural_network;
extern int runNetwork( ... );
extern int getSteadyState(neural_network, int, double);
extern void trainNetwork(neural_network, double);
Last update: November 12, 2025