Implementing AND NOT Gate using Adaline Network
Last Updated :
04 Apr, 2024
ADALINE (Adaptive Linear Neuron or later Adaptive Linear Element)
is an early single-layer artificial neural network and the name of the physical device that implemented this network. The problem here is to implement AND-NOT using the Adaline network. Here we perform 5 epochs of training and calculate the total mean error in each case, the total mean error decreases after each epochs and later becomes nearly constant.
The Truth Table for AND-NOT Gate is as follows:
x1 x2 t
1 1 -1
1 -1 1
-1 1 -1
-1 -1 -1
Below is the implementation AND NOT Gate using Adaline Network:
CPP
#include <iostream>
using namespace std;
int main()
{
// input array
int arr[4][2] = { { 1, 1 },
{ 1, -1 },
{ -1, 1 },
{ -1, -1 }
};
// target array
int t[4] = { -1, 1, -1, -1 }, i, j;
float yi, dif, dw1, dw2, db, w1 = 0.2, w2 = 0.2, b = 0.2, err[4];
// taking bias in each case as 1
// Calculation upto 5 epochs
// consider learning rate = 0.2
for (i = 0; i < 5; i++)
{
float avg = 0;
cout << "EPOCH"<< i + 1 <<"Errors"<< endl
<< endl;
for (j = 0; j < 4; j++)
{
// calculating net input
yi = arr[j][0] * w1 + arr[j][1] * w2 + 1 * b;
dif = t[j] - yi;
// updated weights
w1 += 0.2 * dif * arr[j][0];
w2 += 0.2 * dif * arr[j][1];
b += 0.2 * dif * 1;
err[j] = dif * dif;
cout << err[j] << "";
avg += err[j];
}
cout << endl
<<"Total Mean Error :"<< avg << endl
<< endl
<< endl;
}
return 0;
}
Java
public class Main {
public static void main(String[] args) {
// input array
int[][] arr = { { 1, 1 },
{ 1, -1 },
{ -1, 1 },
{ -1, -1 }
};
// target array
int[] t = { -1, 1, -1, -1 };
float yi, dif, w1 = 0.2f, w2 = 0.2f, b = 0.2f;
float[] err = new float[4];
// taking bias in each case as 1
// Calculation upto 5 epochs
// consider learning rate = 0.2
for (int i = 0; i < 5; i++) {
float avg = 0;
System.out.println("EPOCH " + (i + 1) + " Errors\n");
for (int j = 0; j < 4; j++) {
// calculating net input
yi = arr[j][0] * w1 + arr[j][1] * w2 + 1 * b;
dif = t[j] - yi;
// updated weights
w1 += 0.2 * dif * arr[j][0];
w2 += 0.2 * dif * arr[j][1];
b += 0.2 * dif * 1;
err[j] = dif * dif;
System.out.println(err[j] + " ");
avg += err[j];
}
System.out.println("\nTotal Mean Error : " + avg + "\n\n");
}
}
}
Python3
# input array
arr = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
# target array
t = [-1, 1, -1, -1]
w1 = 0.2
w2 = 0.2
b = 0.2
# taking bias in each case as 1
# Calculation upto 5 epochs
# consider learning rate = 0.2
for i in range(5):
avg = 0
print("EPOCH", i + 1, "Errors\n")
for j in range(4):
# calculating net input
yi = arr[j][0] * w1 + arr[j][1] * w2 + 1 * b
dif = t[j] - yi
# updated weights
w1 += 0.2 * dif * arr[j][0]
w2 += 0.2 * dif * arr[j][1]
b += 0.2 * dif * 1
err = dif * dif
print(err, end=" ")
avg += err
print("\n\nTotal Mean Error :", avg, "\n\n")
JavaScript
// input array
let arr = [[1, 1], [1, -1], [-1, 1], [-1, -1]];
// target array
let t = [-1, 1, -1, -1];
let yi, dif, w1 = 0.2, w2 = 0.2, b = 0.2, err = [];
// taking bias in each case as 1
// Calculation upto 5 epochs
// consider learning rate = 0.2
for (let i = 0; i < 5; i++) {
let avg = 0;
console.log("EPOCH", i + 1, "Errors");
for (let j = 0; j < 4; j++) {
// calculating net input
yi = arr[j][0] * w1 + arr[j][1] * w2 + 1 * b;
dif = t[j] - yi;
// updated weights
w1 += 0.2 * dif * arr[j][0];
w2 += 0.2 * dif * arr[j][1];
b += 0.2 * dif * 1;
err[j] = dif * dif;
console.log(err[j]);
avg += err[j];
}
console.log("Total Mean Error :", avg);
}
OutputEPOCH 1 Errors
2.56 1.2544 0.430336 1.47088
Total Mean Error :5.71562
EPOCH 2 Errors
0.951327 0.569168 0.106353 0.803357
Total Mean Error :2.43021
EPOCH 3 Errors
0.617033 0.494715 0.369035 0.604961
Total Mean Error :2.08574
EPOCH 4 Errors
0.535726 0.496723 0.470452 0.541166
Total Mean Error :2.04407
EPOCH 5 Errors
0.515577 0.503857 0.499932 0.520188
Total Mean Error :2.03955
Share your thoughts in the comments
Please Login to comment...