Open In App

Implementing OR Gate using Adaline Network

Last Updated : 10 Mar, 2024
Improve
Improve
Like Article
Like
Save
Share
Report

Adline stands for adaptive linear neuron. It makes use of linear activation function, and it uses the delta rule for training to minimize the mean squared errors between the actual output and the desired target output. The weights and bias are adjustable. Here, we perform 10 epochs of training and calculate total mean error in each case, the total mean error decreases after certain epochs and later becomes nearly constant.

Truth table for OR gate

x y x or y
-1 -1 -1
-1 1 1
1 -1 1
1 1 1

Below is the implementation.

C++




#include <iostream>
#include <vector>
#include <cmath>
 
// The features for the OR model, here we have taken the possible values for combination of two inputs
std::vector<std::vector<int>> features = {{-1, -1}, {-1, 1}, {1, -1}, {1, 1}};
 
// Labels for the OR model, here the output for the features is taken as an array
std::vector<int> labels = {-1, 1, 1, 1};
 
// Initialise weights, bias, learning rate, epoch
std::vector<double> weight = {0.5, 0.5};
double bias = 0.1;
double learning_rate = 0.2;
int epoch = 10;
 
int main() {
    for (int i = 0; i < epoch; i++) {
        // Epoch is the number of times the model is trained with the same data
        std::cout << "Epoch: " << i+1 << std::endl;
 
        // Variable to check if there is no change in previous weight and present calculated weight
        // Initial error is kept as 0
        double sum_squared_error = 0.0;
 
        // For each of the possible input given in the features
        for (int j = 0; j < features.size(); j++) {
            // Actual output to be obtained
            int actual = labels[j];
 
            // The value of two features as given in the features array
            int x1 = features[j][0];
            int x2 = features[j][1];
 
            // Net unit value computation performed to obtain the sum of features multiplied with their weights
            double unit = (x1 * weight[0]) + (x2 * weight[1]) + bias;
 
            // Error is computed so as to update the weights
            double error = actual - unit;
 
            // Print statement to print the actual value, predicted value and the error
            std::cout << "Error = " << error << std::endl;
 
            // Summation of squared error is calculated
            sum_squared_error += error * error;
 
            // Updation of weights, summing up of product of learning rate, sum of squared error and feature value
            weight[0] += learning_rate * error * x1;
            weight[1] += learning_rate * error * x2;
 
            // Updation of bias, summing up of product of learning rate and sum of squared error
            bias += learning_rate * error;
        }
 
        std::cout << "Sum of squared error = " << sum_squared_error/4 << "\n\n";
    }
 
    return 0;
}
// Note:  differences in floating-point arithmetic can cause slight variations in the
//output error values.


Java




import java.util.Arrays;
import java.util.List;
 
public class Main {
    // The features for the OR model, here we have taken the possible values for combination of two inputs
    static List<List<Integer>> features = Arrays.asList(Arrays.asList(-1, -1), Arrays.asList(-1, 1), Arrays.asList(1, -1), Arrays.asList(1, 1));
 
    // Labels for the OR model, here the output for the features is taken as an array
    static List<Integer> labels = Arrays.asList(-1, 1, 1, 1);
 
    // Initialise weights, bias, learning rate, epoch
    static double[] weight = {0.5, 0.5};
    static double bias = 0.1;
    static double learning_rate = 0.2;
    static int epoch = 10;
 
    public static void main(String[] args) {
        for (int i = 0; i < epoch; i++) {
            // Epoch is the number of times the model is trained with the same data
            System.out.println("Epoch: " + (i+1));
 
            // Variable to check if there is no change in previous weight and present calculated weight
            // Initial error is kept as 0
            double sum_squared_error = 0.0;
 
            // For each of the possible input given in the features
            for (int j = 0; j < features.size(); j++) {
                // Actual output to be obtained
                int actual = labels.get(j);
 
                // The value of two features as given in the features array
                int x1 = features.get(j).get(0);
                int x2 = features.get(j).get(1);
 
                // Net unit value computation performed to obtain the sum of features multiplied with their weights
                double unit = (x1 * weight[0]) + (x2 * weight[1]) + bias;
 
                // Error is computed so as to update the weights
                double error = actual - unit;
 
                // Print statement to print the actual value, predicted value and the error
                System.out.println("Error = " + error);
 
                // Summation of squared error is calculated
                sum_squared_error += error * error;
 
                // Updation of weights, summing up of product of learning rate, sum of squared error and feature value
                weight[0] += learning_rate * error * x1;
                weight[1] += learning_rate * error * x2;
 
                // Updation of bias, summing up of product of learning rate and sum of squared error
                bias += learning_rate * error;
            }
 
            System.out.println("Sum of squared error = " + sum_squared_error/4 + "\n");
        }
    }
}
// Note:  differences in floating-point arithmetic can cause slight variations in the
//output error values.


C#




using System;
 
class Program
{
    static void Main(string[] args)
    {
        int[,] features = { { -1, -1 }, { -1, 1 }, { 1, -1 }, { 1, 1 } };
        int[] labels = { -1, 1, 1, 1 };
        double[] weight = { 0.5, 0.5 };
        double bias = 0.1;
        double learningRate = 0.2;
        int epochs = 10;
 
        for (int epoch = 1; epoch <= epochs; epoch++)
        {
            Console.WriteLine("Epoch: " + epoch);
            double sumSquaredError = 0.0;
 
            for (int i = 0; i < features.GetLength(0); i++)
            {
                int actual = labels[i];
                int x1 = features[i, 0];
                int x2 = features[i, 1];
 
                double unit = x1 * weight[0] + x2 * weight[1] + bias;
                double error = actual - unit;
 
                Console.WriteLine("Error = " + error);
                sumSquaredError += Math.Pow(error, 2);
 
                weight[0] += learningRate * error * x1;
                weight[1] += learningRate * error * x2;
                bias += learningRate * error;
            }
 
            Console.WriteLine("Sum of squared error = " + sumSquaredError / features.GetLength(0) + "\n");
        }
    }
}
// Note:  differences in floating-point arithmetic can cause slight variations in the
//output error values.


Javascript




// The features for the OR model, here we have taken the possible values for combination of two inputs
let features = [[-1, -1], [-1, 1], [1, -1], [1, 1]];
 
// Labels for the OR model, here the output for the features is taken as an array
let labels = [-1, 1, 1, 1];
 
// Initialise weights, bias, learning rate, epoch
let weight = [0.5, 0.5];
let bias = 0.1;
let learning_rate = 0.2;
let epoch = 10;
 
for (let i = 0; i < epoch; i++) {
    // Epoch is the number of times the model is trained with the same data
    console.log("Epoch: ", i+1);
 
    // Variable to check if there is no change in previous weight and present calculated weight
    // Initial error is kept as 0
    let sum_squared_error = 0.0;
 
    // For each of the possible input given in the features
    for (let j = 0; j < features.length; j++) {
        // Actual output to be obtained
        let actual = labels[j];
 
        // The value of two features as given in the features array
        let x1 = features[j][0];
        let x2 = features[j][1];
 
        // Net unit value computation performed to obtain the sum of features multiplied with their weights
        let unit = (x1 * weight[0]) + (x2 * weight[1]) + bias;
 
        // Error is computed so as to update the weights
        let error = actual - unit;
 
        // Print statement to print the actual value, predicted value and the error
        console.log("Error = ", error);
 
        // Summation of squared error is calculated
        sum_squared_error += error * error;
 
        // Updation of weights, summing up of product of learning rate, sum of squared error and feature value
        weight[0] += learning_rate * error * x1;
        weight[1] += learning_rate * error * x2;
 
        // Updation of bias, summing up of product of learning rate and sum of squared error
        bias += learning_rate * error;
    }
 
    console.log("Sum of squared error = ", sum_squared_error/4, "\n\n");
}
// Note:  differences in floating-point arithmetic can cause slight variations in the
//output error values.


Python3




import numpy as np
 
# The features for the OR model, here we have taken the possible values for combination of two inputs
features = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]])
 
# Labels for the OR model, here the output for the features is taken as an array
labels = np.array([-1, 1, 1, 1])
 
# Initialise weights, bias, learning rate, epoch
weight = np.array([0.5, 0.5])
bias = 0.1
learning_rate = 0.2
epoch = 10
 
for i in range(epoch):
    # Epoch is the number of times the model is trained with the same data
    print("Epoch:", i+1)
 
    # Variable to check if there is no change in previous weight and present calculated weight
    # Initial error is kept as 0
    sum_squared_error = 0.0
 
    # For each of the possible input given in the features
    for j in range(len(features)):
        # Actual output to be obtained
        actual = labels[j]
 
        # The value of two features as given in the features array
        x1, x2 = features[j]
 
        # Net unit value computation performed to obtain the sum of features multiplied with their weights
        unit = np.dot(np.array([x1, x2]), weight) + bias
 
        # Error is computed so as to update the weights
        error = actual - unit
 
        # Print statement to print the actual value, predicted value and the error
        print("Error =", error)
 
        # Summation of squared error is calculated
        sum_squared_error += error ** 2
 
        # Updation of weights, summing up of product of learning rate, sum of squared error and feature value
        weight[0] += learning_rate * error * x1
        weight[1] += learning_rate * error * x2
 
        # Updation of bias, summing up of product of learning rate and sum of squared error
        bias += learning_rate * error
 
    print("Sum of squared error =", sum_squared_error / len(features), "\n")


Output:

[[-1 -1]
[-1 1]
[ 1 -1]
[ 1 1]] [-1 1 1 1]
epoch : 1
error = -0.09999999999999998
error = 0.9199999999999999
error = 1.1039999999999999
error = -0.5247999999999999
sum of squared error = 0.5876577599999998
epoch : 2
error = -0.54976
error = 0.803712
error = 0.8172543999999999
error = -0.64406528
sum of squared error = 0.5077284689412096
epoch : 3
error = -0.6729103360000002
error = 0.7483308032
error = 0.7399630438400001
error = -0.6898669486079996
sum of squared error = 0.5090672560860652
epoch : 4
error = -0.7047962935296
error = 0.72625757847552
error = 0.7201693816586239
error = -0.7061914301759491
sum of squared error = 0.5103845399996764
epoch : 5
error = -0.7124421954738586
error = 0.7182636328518943
error = 0.7154472043637898
error = -0.7117071786082882
sum of squared error = 0.5104670846209363
epoch : 6
error = -0.714060481354338
error = 0.715548426006041
error = 0.7144420989392495
error = -0.7134930727032405
sum of squared error = 0.5103479496309858
epoch : 7
error = -0.7143209120714415
error = 0.7146705871452027
error = 0.7142737539596766
error = -0.7140502797165604
sum of squared error = 0.5102658027779979
epoch : 8
error = -0.7143272889928647
error = 0.7143984993919014
error = 0.7142647152041359
error = -0.7142182126044045
sum of squared error = 0.510227607583693
epoch : 9
error = -0.7143072010372341
error = 0.7143174255259156
error = 0.7142744539151652
error = -0.7142671011374249
sum of squared error = 0.5102124122866718
epoch : 10
error = -0.7142946765305948
error = 0.7142942165270032
error = 0.7142809804050706
error = -0.7142808151475037
sum of squared error = 0.5102068786350209


Like Article
Suggest improvement
Share your thoughts in the comments

Similar Reads