neural network – Backpropagation Implementation with gradient checking in F#

So my problem in short is that for specific edge cases the gradients calculated by my backpropgation algorithm are not nearly equal to the numerical calculated gradients. I’m wondering whether my implementation is still correct.

Hope this is the right place to ask for some help to find a possible bug.

Further information

At the end you can find my whole implementation of the neuronal network and backpropagation with the specific functions possibly being wrong called calcDeltas2D & calcDeltasAndAdjustments (I’m working on good naming).

In calcDeltas2D just the deltas for backpropagation are calculated and then in calcDeltasAndAdjustments the results of that function is used to compute the final adjustments (without learning rate).

The code includes tests using Expecto as test library. In each test the results of running backpropagation are compared to numerical gradients.
The first 3 Tests are using random inputs & random weights in various network architecture combinations (trying to cover all cases). These tests succeed most of the time (even with an epsilon which is lower than 1e-1 🙂 ).
The last test case is using a constant for all neurons & random weights. This test is the one which fails most of the time (it appears that the random weights have an influence on the outcome here).

Code:

This can be run with .NET 5 smoothly via dotnet fsi PathToThisCodeFile.fsx.

#r "nuget: MathNet.Numerics"
#r "nuget: MathNet.Numerics.FSharp"
#r "nuget: Expecto"

open MathNet.Numerics

let randomDoubles =
    Random.Random.doubles
    >> Array.map (fun v -> 1.0 - v * 2.0)

let randomDecimals = randomDoubles >> Array.map decimal

module Array2D =

    let inline zip arr1 arr2 =
        arr1
        |> Array2D.mapi (fun i j e1 -> (e1, Array2D.get arr2 i j))

    let inline toSeq (arr: 'a (,)) =
        seq {
            for i = 0 to Array2D.length1 arr - 1 do
                for j = 0 to Array2D.length2 arr - 1 do
                    yield arr.(i, j)
        }

let sigmaFun value = 1.0 / (1.0 + (exp (-float value)))

(<AutoOpen>)
module NN2D =
    type Node = decimal (,)

    type Layer = Node (,)

    type Network2D = { layers: Layer () }

    type LayerResult =
        { activation: decimal (,)
          intervalues: decimal (,) list }

    type PositionNetwork = { layer: int; node: int * int }

    type CNN =
        { filterMatrices: decimal (,) ()
          network: Network2D }

    let getValues2D (inputX: decimal (,)) (network: Network2D) sigmaFun =
        network.layers
        |> Array.fold
            (fun (state: LayerResult) nodes ->
                let activation = state.activation
                let ls = state.intervalues

                let res: decimal (,) =
                    nodes
                    |> Array2D.map
                        (fun weights ->
                            Array2D.zip activation weights
                            |> Array2D.map (fun (a, b) -> a * b)
                            |> Array2D.toSeq
                            |> Seq.sort
                            |> Seq.sum
                            |> sigmaFun)

                { LayerResult.activation = res
                  LayerResult.intervalues = List.append ls ( res ) })
            { LayerResult.activation = inputX
              LayerResult.intervalues = () }

    let calcDeltas2D (inputX: decimal (,)) network sigmaFun (expected: decimal (,)): decimal (,) () * LayerResult =
        let layerResult = getValues2D inputX network sigmaFun
        let lastLayerIndex = network.layers.Length - 1

        let deltas: decimal (,) () =
            (| for layer in network.layers -> Array2D.zeroCreate (Array2D.length1 layer) (Array2D.length2 layer) |)

        layerResult.intervalues.(lastLayerIndex)
        |> Array2D.iteri
            (fun j1 j2 o_j -> deltas.(lastLayerIndex).(j1, j2) <- (o_j - expected.(j1, j2)) * (o_j * (1.0m - o_j)))

        for layer in lastLayerIndex - 1 .. -1 .. 0 do
            layerResult.intervalues.(layer)
            |> Array2D.iteri
                (fun j1 j2 o_j ->
                    let deltaSum =
                        let upperLayer = layer + 1

                        layerResult.intervalues.(upperLayer)
                        |> Array2D.mapi
                            (fun l1 l2 _ ->
                                let delta_l = deltas.(upperLayer).(l1, l2)

                                let w_jl =
                                    network.layers.(upperLayer).(l1, l2).(j1, j2)

                                delta_l * w_jl)
                        |> Array2D.toSeq
                        |> Seq.sort
                        |> Seq.sum

                    let delta_j = deltaSum * (o_j * (1.0m - o_j))
                    deltas.(layer).(j1, j2) <- delta_j)

        deltas, layerResult

    let calcDeltasAndAdjustments (input: decimal (,)) (network) sigmaFun (expected: decimal (,)) =
        let deltasBeforePoolingLayer, layerResult =
            calcDeltas2D input network sigmaFun expected

        let intervalues =
            input :: layerResult.intervalues |> List.toArray

        network.layers
        |> Array.take (Array.length intervalues - 1)
        |> Array.mapi
            (fun layerNumber nodes ->
                nodes
                |> Array2D.mapi
                    (fun j1 j2 weights ->
                        weights
                        |> Array2D.mapi
                            (fun i1 i2 _ ->
                                deltasBeforePoolingLayer.(layerNumber).(j1, j2)
                                * intervalues.((layerNumber - 1) + 1).(i1, i2)))),
        layerResult

    open Expecto
    open Expecto.Logging
    open Expecto.Logging.Message

    let private _tests =
        let sigmaFun (value: decimal) =
            1.0 / (1.0 + (exp (-float value))) |> decimal

        let logger = Log.create "2D NN Tests"

        testList
            "2-Dimensional NN functionality tests"
            ( let compareGradients
                network
                input
                (expectedValues: decimal (,))
                (adjustments: decimal (,) (,) ())
                logging
                =
                  network.layers
                  |> Array.mapi
                      (fun layerNumber nodes ->
                          nodes
                          |> Array2D.mapi
                              (fun j1 j2 weights ->
                                  Array2D.zip adjustments.(layerNumber).(j1, j2) weights
                                  |> Array2D.mapi
                                      (fun i1 i2 (adjustment, weight) ->
                                          let calcNetworkWithWeightAdjustment epsilon =
                                              let weights = network.layers.(layerNumber).(j1, j2)
                                              let oldWeight = weight
                                              let res1 = getValues2D input network sigmaFun
                                              weights.(i1, i2) <- oldWeight + epsilon
                                              let res = getValues2D input network sigmaFun

                                              if logging then
                                                  eventX
                                                      $"weight: %A{(i1, i2)}, value: {oldWeight}, res: %A{res.activation}, {
                                                                                                                                res1.activation = res.activation
                                                      }"
                                                  |> logger.info

                                              weights.(i1, i2) <- oldWeight

                                              let acc =
                                                  res.activation
                                                  |> Array2D.mapi
                                                      (fun g h activation ->
                                                          (expectedValues.(g, h) - activation)
                                                          * (expectedValues.(g, h) - activation))
                                                  |> Array2D.toSeq
                                                  |> Seq.sort
                                                  |> Seq.sum

                                              0.5m * acc

                                          let numericalGradient =
                                              let epsilons = (| 99399e-5m |)
                                              if logging then printfn "%A" epsilons
                                              let avgEpsilon = epsilons |> Array.average

                                              let calcNetworkWithShiftedWeightMultipleEpsilons epsilons negate =
                                                  let transform = if negate then fun e -> -e else id

                                                  epsilons
                                                  |> Array.averageBy
                                                      (fun e ->
                                                          let res =
                                                              transform e |> calcNetworkWithWeightAdjustment

                                                          res)

                                              let res1, res2 =
                                                  calcNetworkWithShiftedWeightMultipleEpsilons epsilons false,
                                                  calcNetworkWithShiftedWeightMultipleEpsilons epsilons true

                                              let a =
                                                  match (res1 - res2) with
                                                  | 0.m -> 1e-20m
                                                  | value -> value

                                              (a) / (2.0m * avgEpsilon)

                                          let dist =
                                              abs (adjustment - numericalGradient)
                                              / (abs adjustment + abs numericalGradient)
                                          if logging && dist > 1.0e-1m then
                                              eventX
                                                  $"calculated gradient: {adjustment}, numerical gradient: {
                                                                                                                numericalGradient
                                                  }, dist: {dist}"
                                              |> logger.info

                                          dist <= 1.0e-1m)))

              test "Check that Calculated gradients are nearly equal to the numerical gradients | Single Node in all Layers" {
                  let input =
                      randomDecimals 1 |> Array.singleton |> array2D

                  let network =
                      { Network2D.layers =
                            (| array2D (| (| array2D (| randomDecimals input.Length |) |) |)
                               array2D (| (| array2D (| randomDecimals 1 |> Array.map decimal |) |) |) |) }

                  let expectedValues =
                      randomDecimals 1 |> Array.singleton |> array2D

                  let adjustments, _ =
                      calcDeltasAndAdjustments input network sigmaFun expectedValues

                  let expected =
                      network.layers
                      |> Array.map (Array2D.map (Array2D.map (fun _ -> true)))

                  let actual =
                      compareGradients network input expectedValues adjustments false

                  Expect.sequenceEqual actual expected "gradients"
              }

              test "Check that Calculated gradients are nearly equal to the numerical gradients | Single Node only in last layer" {
                  let input = array2D (| randomDecimals 10 |)
                  let innerLayerSize = 3

                  let network =
                      { Network2D.layers =
                            (| Array2D.init
                                innerLayerSize
                                innerLayerSize
                                (fun _ _ -> array2D (| randomDecimals <| Array2D.length2 input |))
                               array2D (| (| array2D (| for _ in 0 .. innerLayerSize - 1 ->
                                                            randomDecimals innerLayerSize |) |) |) |) }

                  let expectedValues =
                      randomDecimals 1 |> Array.singleton |> array2D

                  let adjustments, _ =
                      calcDeltasAndAdjustments input network sigmaFun expectedValues

                  let expected =
                      network.layers
                      |> Array.map (Array2D.map (Array2D.map (fun _ -> true)))

                  let actual =
                      compareGradients network input expectedValues adjustments false

                  Expect.sequenceEqual actual expected "gradients"
              }

              test "Check that Calculated gradients are nearly equal to the numerical gradients | Multi Node in all Layers" {
                  let input = array2D (| randomDecimals 10 |)
                  let innerLayerSize = 3

                  let network =
                      { Network2D.layers =
                            (| Array2D.init
                                innerLayerSize
                                innerLayerSize
                                (fun _ _ -> array2D (| randomDecimals <| Array2D.length2 input |))
                               array2D (| (| for _ in 0 .. innerLayerSize - 1 ->
                                                 array2D (| for _ in 0 .. innerLayerSize - 1 ->
                                                                randomDecimals innerLayerSize |) |) |) |) }

                  let expectedValues =
                      randomDecimals innerLayerSize
                      |> Array.singleton
                      |> array2D

                  let adjustments, _ =
                      calcDeltasAndAdjustments input network sigmaFun expectedValues

                  let expected =
                      network.layers
                      |> Array.map (Array2D.map (Array2D.map (fun _ -> true)))

                  let actual =
                      compareGradients network input expectedValues adjustments false

                  Expect.sequenceEqual actual expected "gradients"
              }

              test "regression 1" {
                  let input = Array2D.create 1 5 -0.2m
                  let expectedValues = array2D ( ( 1.0m ) )

                  let network =
                      let layer1X = 5
                      let layer1Y = 1

                      let layers =
                          (| Array2D.create layer1X layer1Y
                             <| array2D (Array.init 1 (fun _ -> randomDecimals 5))
                             Array2D.create 1 1
                             <| array2D (Array.init layer1X (fun _ -> randomDecimals layer1Y)) |)

                      { layers = layers }

                  let adjustments, _ =
                      calcDeltasAndAdjustments input network sigmaFun expectedValues

                  let expected =
                      network.layers
                      |> Array.map (Array2D.map (Array2D.map (fun _ -> true)))

                  let actual =
                      compareGradients network input expectedValues adjustments true

                  Expect.sequenceEqual actual expected "gradients"
              } )
        |> runTestsWithCLIArgs () (||)
```

Testing correctness of feedforward neural network implementation

So I’m currently reading about neuroevolution (NEAT, WANN) and trying to make my own implementation just as an exercise. Now I want to test if my feedforward implementation gives the expected output. I’m doing this manually right now and it’s starting to get tedious, specially since the resulting topology is kind of crazy.

Is there some kind of dataset online which I can cross-reference? I want to do something like: given this input and topology+weights, this should be the output.

machine learning – Designing a neural network with LSTM and feedforward NN combination

Currently, I’m designing a neural network that works with reinforcement learning. In summary, the agent takes in information about itself and nearby agents and, in conjunction with global world information, makes a decision.

I’m currently thinking of implementing this as a LSTM to take in information about itself and a variable number of nearby agents and a feedforward neural network to combine the information from the LSTM output and global world information to produce an action.

Would this approach be sufficient to produce meaningful results? I thought that another approach would be to take in the global world information and each agent at each LSTM cell, though it may use much more resources (resources during forward propagation are a main concern with this project). Also, if the second approach is used, how would I be able to link the inputs to outputs if they had different shapes (attempting to learn without a library)? How would I be able to map an input with shape (1, x, 6) to (1, 1, 4) or (1, 4).

Neural DSP Archetype Cory Wong v1.0.0 | Nulled Scripts Download

File Size : 179.92 MB

An incredibly versatile plugin, designed to offer a wide variety of options for players in search of the perfect clean and edge-of-break-up tones.

It is hard to think of many artists that have been able to achieve what Vulfpeck has. The perfect combination of insane talent, fantastic music, and branding genius has taken them from their DIY roots to amassing an internationally revered cult-like fanbase, selling out Madison Square Garden. In that, Cory Wong has become a modern guitar hero, known for his unique playing style and perfect tone.

Cory’s uncompromising standards and borderline unrealistic expectations for perfection provided the challenge we were after. The result is unlike anything we have done before: an incredibly versatile plugin, designed to offer a wide variety of options for players in search of the perfect clean and edge-of-break-up tones.

We proudly present you, Archetype: Cory Wong

What’s New:
official site does not provide any info about changes in this version.

HOMEPAGE

Download From Rapidgator

Download From Nitroflare

 

.

image processing – how to implement training for UNET implementation on Wolfram Neural Net Repository?

My question is with reference to the U-NET implementation present on the Wolfram Neural Net Repository

The construction notebook present on the page (link: http://www.wolframcloud.com/files/1737200a-b043-413c-ad37-477e208472ad?contentDisposition=attachment) contains all the necessary functions for constructing the net. However, it does not contain the procedure for training the neural net.

I am trying to implement a simple training procedure so that I can firstly train the net myself on the same dataset which the net was initially trained on (https://www.dropbox.com/sh/8dcqxlj94fyyop0/AADib7XPcVkJ1PHddD2Nm9Moa?dl=0). Thereafter, I would like to use a different dataset for training.

Please download the construction notebook before proceeding. The only code that I have added to the construction notebook is mentioned below:

(*loading the images, resizing them and augmenting them to produce the training dataset;
background labelled as 1 and cells in the foreground as 2.*)

fnamesimages = Import("C:\Users\aliha\Downloads\dataset\images\");
ordering = Ordering@Flatten@StringCases(fnamesimages, (p : DigitCharacter ..) ~~ ".tif" :> FromDigits@p);
fnamesimages = fnamesimages((ordering));

images = Import("C:\Users\aliha\Downloads\dataset\images\" <> #) &/@fnamesimages;
images = ImageResize(#, {388, 388}) & /@ images;
masks = Import("C:\Users\aliha\Downloads\dataset\segmentation\" <> #) &/@fnamesimages;
allmasks = Flatten@Table(ImageRotate(j, i), {j, masks}, {i, {0, Pi/2, Pi, 3/2 Pi}});
allmasks = Join(allmasks, ImageReflect /@ allmasks);
maskres = ImageResize(#, {388, 388}) & /@ allmasks;

m = ArrayComponents(ImageData@#, 2, {0. -> 1, n_ /; n != 0. -> 2}) &/@maskres;
allimages = Flatten@Table(ImageRotate(j, i), {j, images}, {i, {0, Pi/2, Pi, 3/2 Pi}});
allimages = Join(allimages, ImageReflect /@ allimages);

(* using a small subset of images and segmented images because of GPU memory crash*)
trained = NetTrain(unet, allimages((1 ;; 50)) -> m((1 ;; 50)), All, BatchSize -> 5, MaxTrainingRounds -> 1, TargetDevice -> "GPU");
trainedNet = trained("TrainedNet");

enter image description here

In addition I am using the code in the example notebook (present on the same page) to then evaluate the trained net on a test image.

Clear@netevaluate;
netevaluate(img_, device_ : "CPU") :=
Block({net = trainedNet, dims = ImageDimensions(img), pads, mask},
pads = Map({Floor(#), Ceiling(#)} &, Mod(4 - dims, 16)/2);
mask = NetReplacePart(net,
  {"Input" -> 
    NetEncoder({"Image", Ceiling(dims - 4, 16) + 188, 
      ColorSpace -> "Grayscale"}),
   "Output" -> 
    NetDecoder({"Class", Range(2), "InputDepth" -> 3})})(
 ImagePad(ColorConvert(img, "Grayscale"), pads + 92, 
  Padding -> "Reversed"),
 TargetDevice -> device
 );
Take(mask, {1, -1} Reverse(pads((2)) + 1), {1, -1} (pads((1)) + 1))
);

we can now load the test image and apply the net.

testimg = Import("C:\Users\aliha\Downloads\dataset\test image\t099.tif);
netevaluate(testimg)//Colorize

enter image description here

Unfortunately I do not get any segmentations back. I just get the background. Could someone kindly let me know where I may be having the issue? Thanks !

machine learning – Designing a neural network given hypothesis class

The class of “lower-left quadrants” in $mathbb{R}^{2},$ which is defined as follows:
$$
mathcal{H}_{l l}=left{h_{a, b} in{0,1}^{mathbb{R}^{2}} mid a, b in mathbb{R}right}
$$

with $h_{a, b}(mathbf{x})=h_{a, b}left(left(x_{1}, x_{2}right)right)=1$ if and only if $x_{1} leq a$ and $x_{2} leq b$

I want to design a neural network that could output functions from this class.

Build a neural network in python without using numpy but only the math library

So, after I am processing the data here below:

# Load a CSV file
afrom csv import reader

def load_csv(filename):
    dataset = list()
    with open(filename, 'r') as file:
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            dataset.append(row)
    return dataset


# Convert string values to float
def str_column_to_float(dataset, column):
    for row in dataset:
        row(column) = float(row(column).strip())


filename = 'gameIS.csv'
dataset = load_csv(filename)
for i in range(len(dataset(0))):
    str_column_to_float(dataset, i)


# Find the min and max values for each column

def dataset_minmax(dataset):
    minmax = list()
    stats = ((min(column), max(column)) for column in zip(*dataset))
    return stats


minmax = dataset_minmax(dataset)


# Rescale the dataset columns to the range 0-1
def normalize_dataset(dataset, minmax):
    for row in dataset:
        for i in range(len(row) - 1):
            row(i) = (row(i) - minmax(i)(0)) / (minmax(i)(1) - minmax(i)(0))
    # return dataset


# print(minmax)
normalize_dataset(dataset, minmax)
# dataset
if __name__ == '__main':
    load_csv('gameIS.csv')
I try to build a neural network without using numpy here below:


from random import random
from math import exp
import matplotlib.pyplot as plt


class Neuron(object):

    def __init__(self):
        self.network = None

    def initialize_network(self, n_inputs, n_hidden, n_outputs):
        """
        Initializes a network by taking the number of inputs, the number of neurons to have in the hidden layer and
        the number of outputs in output layer
        Network is a list a of two layers(lists): Hidden layer and Output layer
        Each layer has neruons, where each neuron is a dictionary of weights and biases
        """
        self.network = list()
        hidden_layer = ({'weights': (random() for i in range(n_inputs + 1))} for i in range(n_hidden))
        self.network.append(hidden_layer)
        output_layer = ({'weights': (random() for i in range(n_hidden + 1))} for i in range(n_outputs))
        self.network.append(output_layer)
        return self.network

    # Calculate neuron activation for an input
    def activateself(self, weights, inputs):
        activation = weights(-1)  # weights(-1) = Bias
        for i in range(len(weights) - 1):
            activation += weights(i) * inputs(i)
        return activation

    # Transfer neuron activation
    def transfer(self, activation):
        return 1.0 / (1.0 + exp(-activation))

    # Forward propogate input to a network output
    def forward_propagate(self, row):
        inputs = row
        for layer in self.network:
            new_inputs = ()
            # print("Output of neurons: ")
            for neuron in layer:
                activation = self.activate(neuron('weights'), inputs)
                neuron('output') = self.transfer(activation)
                # print(neuron('output'))
                new_inputs.append(neuron('output'))
            inputs = new_inputs
        return inputs

    # Calculates the derivative of an neuron output
    def transfer_dervative(self, output):
        lamda = 0.8
        return output * (1.0 - output) * lamda

    def backward_propogate_error(self, expected):
        for i in reversed(range(len(self.network))):
            layer = self.network(i)
            # print(layer)
            # print("Error terms at layers: ")
            errors = list()
            if i != len(self.network) - 1:
                for j in range(len(layer)):
                    error = 0.0
                    for neuron in self.network(i + 1):
                        error += neuron('weights')(j) * neuron('delta')
                        # error = (weight_k * error_j) * transfer_derivative(output)
                        # Where error_j is the error signal from the jth neuron in the output layer,
                        # weight_k is the weight that connects the kth neuron to the current neuron and output is the output for the current neuron.
                    errors.append(error)
                    # print(errors)
            else:
                for j in range(len(layer)):
                    neuron = layer(j)
                    errors.append(expected(j) - neuron('output'))
                # print(errors)
            for j in range(len(layer)):
                neuron = layer(j)
                neuron('delta') = errors(j) * self.transfer_dervative(neuron('output'))
                # error_at_ouput_layer = (expected - output of layer) * transfer_derivative(output)

    # Update network weights with error
    def update_weights(self, row, l_rate):
        for i in range(len(self.network)):
            inputs = row(:-2)
            # print(inputs)
            if i != 0:
                inputs = (neuron('output') for neuron in self.network(i - 1))  # Output layer input = output of hiddenlayer
                # print(inputs)
            for neuron in self.network(i):
                # print(neuron)
                for j in range(len(inputs)):
                    # weight = weight + learning_rate * error * input
                    neuron('weights')(j) += l_rate * neuron('delta') * inputs(j)
                neuron('weights')(-1) += l_rate * neuron('delta')  # updating bias
                # print(neuron('weights')(-1) )

    def train_network(self, train, l_rate, n_epochs, n_outputs):
        print(len(train))
        for epoch in range(n_epochs):
            Total_error = 0
            for row in train:
                sum_error_row = 0
                outputs = self.forward_propogate(row(:2))
                # print("Predicted:",outputs)
                expected = row(-2:)
                # print("expected:",expected)
                sum_error_row += sum(((expected(i) - outputs(i)) ** 2 for i in range(len(expected))))
                # print("sum_row:",sum_error_row)
                self.backward_propogate_error(expected)
                self.update_weights(row, l_rate)
                Total_error += sum_error_row
            # print(Total_error)
              print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate,((Total_error)/len(train))))

if __name__ == '__main__':
    neural_net = Neuron()
    neural_net.initialize_network(2, 4, 2)
    neural_net.forward_propagate((1,1))

The problem is I am getting the following error:

C:Python3python.exe M:/neural_net/network.py
Traceback (most recent call last):
File "M:/neural_net/network.py", line 120, in <module>
neural_net.forward_propagate((1,1))
File "M:/neural_net/network.py", line 44, in forward_propagate
activation = self.activate(neuron('weights'), inputs)
AttributeError: 'Neuron' object has no attribute 'activate'
 

The error refers to the activation function which multiplies the weights with the inputs.
Does anyone know how to remedy this particular error in the code?

In python Cross-validation in neural network with multi class labes in target variable

hey everyone can you please help me to find the solution of this problem
i want to apply cross validation in neural networks but it is showing nan values in output,
here is my codes

import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

# define baseline model
def baseline_model():
# create model
    model = Sequential()
    model.add(Dense(8, input_dim=4, activation='relu'))
    model.add(Dense(3, activation='softmax'))
# Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=('accuracy'))
    return model


estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=5, verbose=1)
kfold = KFold(n_splits=10, shuffle=True)
results = cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))

neural networks – Time of maximum difference between two exponentials

I am looking at some equations for the LIF neuron model, and the kernel function is a difference of two exponential terms:

$K(t) = (exp(frac{-t}{tau_m}) – exp(frac{-t}{tau_s})) times Vnorm$

Where $Vnorm$ is some normalisation constant (not important, just affects the height of the function). This function is non-monotonic – increasing then decreasing. My question is: how do I analytically determine the value of $t$ where $K(t)$ is the peak, i.e. the value of t where the difference between the two exponential terms is the highest?

python – Implementing Convolutional Neural Network

Context

I was making a Convolutional Neural Network from scratch in Python. I completed making it …. It works fine … The only thing is that it takes a lot of time as the size of the input grows.

Code

import numpy as np
import math

class ConvolutionalNeuralNetwork():
    def __init__(self, num_of_filters, kernel_shape, stride):
        self.num_of_filters = num_of_filters
        self.kernel_shape = kernel_shape
        self.stride = stride
        self.kernels = ()

        # Initialize 
        for i in range(self.num_of_filters):
            self.kernel = np.random.uniform(-1, 1, size=(3,3))
            self.kernels.append(self.kernel)
        self.kernels = np.array(self.kernels)

    def ElementWiseAddition(self, images):
        if np.array(images).shape(0) == 1:
            return images(0)
        
        resultant_image = images(0)
        for image in images(1:):
            resultant_image = np.add(image, resultant_image)
            resultant_image = resultant_image.astype(float)
            resultant_image /= 2.0

        return resultant_image

    def GetOutput(self, x):
        filter_maps = ()
        for filter_n in range(self.num_of_filters):
            kernel_n_filter_maps = ()
            for image in x:
                filter_map = ()
                for i in range(0, (image.shape(0)-3)+1, self.stride):
                    row = ()
                    for j in range(0, (image.shape(1)-3)+1, self.stride):
                        piece = image(i:i+3, j:j+3)
                        value = np.sum(np.multiply(self.kernels(filter_n), piece))

                        # Apply Softmax Activation
                        if value < 0.0:
                            value = 0
                        row.append(value)
                    filter_map.append(row)
                kernel_n_filter_maps.append(filter_map)
            filter_maps.append(self.ElementWiseAddition(kernel_n_filter_maps))
        return np.array(filter_maps)

input = np.random.uniform(-1, 1, size=(512, 4, 4))

ConvolutionalNN = ConvolutionalNeuralNetwork(1028, (3,3), stride=1)
output = ConvolutionalNN.GetOutput(input)
print(output.shape)

How can I make this code consume less time and make it more efficient?