Invert the Kth most significant bit of N
Last Updated :
22 Feb, 2022
Given two non-negative integers N and K, the task is to invert the Kth most significant bit of N and print the number obtained after inverting the bit.
Examples:
Input: N = 10, K = 1
Output: 2
The binary representation of 10 is 1010.
After inverting the first bit it becomes 0010
whose decimal equivalent is 2.
Input: N = 56, K = 2
Output: 40
Approach: Find the number of bits in N, if the number of bits is less than K then N itself is the required answer else flip the Kth most significant bit of N and print the number obtained after flipping it.
Below is the implementation of the above approach:
C++
#include <bits/stdc++.h>
using namespace std;
void decBinary( int arr[], int n)
{
int k = log2(n);
while (n > 0) {
arr[k--] = n % 2;
n /= 2;
}
}
int binaryDec( int arr[], int n)
{
int ans = 0;
for ( int i = 0; i < n; i++)
ans += arr[i] << (n - i - 1);
return ans;
}
int getNum( int n, int k)
{
int l = log2(n) + 1;
int a[l] = { 0 };
decBinary(a, n);
if (k > l)
return n;
a[k - 1] = (a[k - 1] == 0) ? 1 : 0;
return binaryDec(a, l);
}
int main()
{
int n = 56, k = 2;
cout << getNum(n, k);
return 0;
}
|
Java
class GFG
{
static void decBinary( int arr[], int n)
{
int k = ( int )(Math.log(n) /
Math.log( 2 ));
while (n > 0 )
{
arr[k--] = n % 2 ;
n /= 2 ;
}
}
static int binaryDec( int arr[], int n)
{
int ans = 0 ;
for ( int i = 0 ; i < n; i++)
ans += arr[i] << (n - i - 1 );
return ans;
}
static int getNum( int n, int k)
{
int l = ( int )(Math.log(n) /
Math.log( 2 )) + 1 ;
int a[] = new int [l];
decBinary(a, n);
if (k > l)
return n;
a[k - 1 ] = (a[k - 1 ] == 0 ) ? 1 : 0 ;
return binaryDec(a, l);
}
public static void main (String[] args)
{
int n = 56 ;
int k = 2 ;
System.out.println(getNum(n, k));
}
}
|
Python
import math
def decBinary(arr, n):
k = int (math.log2(n))
while (n > 0 ):
arr[k] = n % 2
k = k - 1
n = n / / 2
def binaryDec(arr, n):
ans = 0
for i in range ( 0 , n):
ans = ans + (arr[i] << (n - i - 1 ))
return ans
def getNum(n, k):
l = int (math.log2(n)) + 1
a = [ 0 for i in range ( 0 , l)]
decBinary(a, n)
if (k > l):
return n
if (a[k - 1 ] = = 0 ):
a[k - 1 ] = 1
else :
a[k - 1 ] = 0
return binaryDec(a, l)
n = 56
k = 2
print (getNum(n, k))
|
C#
using System;
class GFG
{
static void decBinary( int []arr, int n)
{
int k = ( int )(Math.Log(n) /
Math.Log(2));
while (n > 0)
{
arr[k--] = n % 2;
n /= 2;
}
}
static int binaryDec( int []arr, int n)
{
int ans = 0;
for ( int i = 0; i < n; i++)
ans += arr[i] << (n - i - 1);
return ans;
}
static int getNum( int n, int k)
{
int l = ( int )(Math.Log(n) /
Math.Log(2)) + 1;
int []a = new int [l];
decBinary(a, n);
if (k > l)
return n;
a[k - 1] = (a[k - 1] == 0) ? 1 : 0;
return binaryDec(a, l);
}
public static void Main(String[] args)
{
int n = 56;
int k = 2;
Console.WriteLine(getNum(n, k));
}
}
|
Javascript
<script>
function decBinary(arr, n)
{
let k = parseInt(Math.log2(n), 10);
while (n > 0)
{
arr[k--] = n % 2;
n = parseInt(n/2,10);
}
}
function binaryDec(arr, n)
{
let ans = 0;
for (let i = 0; i < n; i++)
ans += arr[i] << (n - i - 1);
return ans;
}
function getNum(n,k)
{
let l = parseInt(Math.log2(n),10) + 1;
let a = new Array(l);
a.fill(0);
decBinary(a, n);
if (k > l)
return n;
a[k - 1] = (a[k - 1] == 0) ? 1 : 0;
return binaryDec(a, l);
}
let n = 56, k = 2;
document.write(getNum(n, k));
</script>
|
Time Complexity: O(log(m) + log(n))
Auxiliary Space: O(log(m) + log(n))
Like Article
Suggest improvement
Share your thoughts in the comments
Please Login to comment...