Skip to content

Add custom loss functions and a R/W state matrix #936

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 32 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
ee5fc8d
Implement a loss function for GPU
ashenmagic Jun 15, 2024
6baf8c6
Add partial support for loss functions
ashenmagic Jun 16, 2024
eb909c9
Update loss.ts
ashenmagic Jun 16, 2024
1f2c681
OMG OMG OMG!!!!!! ZOOOOOMIESSS <3333
ashenmagic Jun 16, 2024
ba12f82
Fixed the bug~! <3
ashenmagic Jun 16, 2024
7371a23
Generalize loss function for both CPU and GPU
ashenmagic Jun 16, 2024
2a7840a
Add memory function
ashenmagic Jun 16, 2024
a762a48
Backup: Another thunderstorm, power outage risk
ashenmagic Jun 16, 2024
94fc99a
Revert "Backup: Another thunderstorm, power outage risk"
ashenmagic Jun 16, 2024
ba03eb3
Add parameter `lossDelta`
ashenmagic Jun 16, 2024
aa337f3
Rename memory to RAM
ashenmagic Jun 16, 2024
c655c52
Add `updateRAM`
ashenmagic Jun 17, 2024
b45d581
Fix bug that required `ramSize` to be defined
ashenmagic Jun 17, 2024
b703e4a
Prune unused code
ashenmagic Jun 17, 2024
fda0349
Run `updateRAM` on both CPU and GPU nets
ashenmagic Jun 17, 2024
3d392f1
Design custom loss function for autoencoders
ashenmagic Jun 17, 2024
ce98bf1
Fix CI task errors
ashenmagic Jun 17, 2024
51b9aa9
Fix a CI task related to type coersion
ashenmagic Jun 17, 2024
c5c8438
TypeScript hates me today
ashenmagic Jun 17, 2024
e8384a5
Fix all lint errors
ashenmagic Jun 17, 2024
a21c387
Remove unused `@ts-expect-error` directive
ashenmagic Jun 17, 2024
83574f6
Please, linter gods, pleaaaase stop hating me
ashenmagic Jun 17, 2024
52edc88
Properly initialize `NeuralNetwork.ram`
ashenmagic Jun 18, 2024
8f8f455
Finish updating autoencoder to use loss function
ashenmagic Jun 18, 2024
00b8515
Add a CPU variant of autoencoder
ashenmagic Jun 18, 2024
e4e6906
Polish autoencoders and remove debug code
ashenmagic Jun 18, 2024
4d7b5ef
Remove debug code
ashenmagic Jun 18, 2024
cd0ad75
Export the CPU autoencoder implementation
ashenmagic Jun 18, 2024
693bd0b
Update tests and documentation
ashenmagic Jun 19, 2024
a03161f
Merge branch 'main' into feature/loss-function
ashenmagic Nov 6, 2024
561dda3
Remove duplicate lines added during merge
ashenmagic Dec 29, 2024
42aece2
Resolve nested tests
ashenmagic Jan 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Implement a loss function for GPU
  • Loading branch information
ashenmagic committed Jun 15, 2024
commit ee5fc8d544ffa8e76860c251f9640c401f4710cd
8 changes: 5 additions & 3 deletions src/neural-network-gpu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ function weightedSumTanh(
return Math.tanh(sum);
}

function calcErrorOutput(output: number, target: number): number {
function loss(output: number, target: number): number {
return target - output;
}

Expand Down Expand Up @@ -400,6 +400,8 @@ export class NeuralNetworkGPU<
);
}

let _loss = typeof this.loss === "function" ? this.loss : loss;

calcDeltas = alias(
utils.getMinifySafeName(() => calcDeltas),
calcDeltas
Expand All @@ -411,7 +413,7 @@ export class NeuralNetworkGPU<
// @ts-expect-error
this.backwardPropagate[this.outputLayer] = this.gpu.createKernelMap(
{
error: calcErrorOutput,
error: _loss,
},
function (
this: IKernelFunctionThis,
Expand All @@ -422,7 +424,7 @@ export class NeuralNetworkGPU<
const target = targets[this.thread.x];
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
return calcDeltas(calcErrorOutput(output, target), output);
return calcDeltas(loss(output, target), output);
},
{
output: [this.sizes[this.outputLayer]],
Expand Down
39 changes: 39 additions & 0 deletions src/neural-network.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import { mse } from './utilities/mse';
import { randos } from './utilities/randos';
import { zeros } from './utilities/zeros';

export type LossFunction = (actual: number, expected: number) => number;

type NeuralNetworkFormatter =
| ((v: INumberHash) => Float32Array)
| ((v: number[]) => Float32Array);
Expand Down Expand Up @@ -107,6 +109,7 @@ export interface INeuralNetworkTrainOptions {
errorThresh: number;
log: boolean | ((status: INeuralNetworkState) => void);
logPeriod: number;
loss: boolean | LossFunction;
leakyReluAlpha: number;
learningRate: number;
momentum: number;
Expand All @@ -126,6 +129,7 @@ export function trainDefaults(): INeuralNetworkTrainOptions {
errorThresh: 0.005, // the acceptable error percentage from training data
log: false, // true to use console.log, when a function is supplied it is used
logPeriod: 10, // iterations between logging out
loss: false,
leakyReluAlpha: 0.01,
learningRate: 0.3, // multiply's against the input and the delta then adds to momentum
momentum: 0.1, // multiply's against the specified "change" then adds to learning rate for change
Expand Down Expand Up @@ -188,6 +192,8 @@ export class NeuralNetwork<
return this.calculateDeltas(output);
};

loss: boolean | LossFunction = false;

// adam
biasChangesLow: Float32Array[] = [];
biasChangesHigh: Float32Array[] = [];
Expand Down Expand Up @@ -470,6 +476,10 @@ export class NeuralNetwork<
const val = options.logPeriod;
return typeof val === 'number' && val > 0;
},
loss: () => {
const val = options.loss;
return typeof val === 'function' || typeof val === 'boolean';
},
leakyReluAlpha: () => {
const val = options.leakyReluAlpha;
return typeof val === 'number' && val > 0 && val < 1;
Expand Down Expand Up @@ -666,6 +676,8 @@ export class NeuralNetwork<
data: Array<INeuralNetworkDatum<Partial<InputType>, Partial<OutputType>>>,
options: Partial<INeuralNetworkTrainOptions> = {}
): INeuralNetworkState {
this.loss = options.loss ?? false;

const { preparedData, status, endTime } = this.prepTraining(
data as Array<INeuralNetworkDatum<InputType, OutputType>>,
options
Expand Down Expand Up @@ -826,6 +838,33 @@ export class NeuralNetwork<
}
}

_calculateDeltasLoss(target: Float32Array): void {
for (let layer = this.outputLayer; layer >= 0; layer--) {
const activeSize = this.sizes[layer];
const activeOutput = this.outputs[layer];
const activeError = this.errors[layer];
const activeDeltas = this.deltas[layer];
const nextLayer = this.weights[layer + 1];

for (let node = 0; node < activeSize; node++) {
const output = activeOutput[node];

let error = 0;
if (layer === this.outputLayer) {
if (typeof this.loss === "function") error = this.loss(output, target[node]);
else error = target[node] - output;
} else {
const deltas = this.deltas[layer + 1];
for (let k = 0; k < deltas.length; k++) {
error += deltas[k] * nextLayer[k][node];
}
}
activeError[node] = error;
activeDeltas[node] = error * output * (1 - output);
}
}
}

/**
*
* Changes weights of networks
Expand Down
pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy