I am an AI developer and my areas of expertise are Deep Learning , High Performance Computing .
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(1,100,1000)
v = 10*np.sin(t/(2*np.pi))
w = v ** 2
w_db = 10 * np.log10(w)
target_snr_db = 20
# Calculate signal power and convert to dB
sig_avg_watts = np.mean(w)
sig_avg_db = 10 * np.log10(sig_avg_watts)
# Calculate noise according to [2] then convert to watts
noise_avg_db = sig_avg_db - target_snr_db
noise_avg_watts = 10 ** (noise_avg_db / 10)
# Generate an sample of white noise
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(w))
# Noise up the original signal
y_volts = v + noise_volts
def signal_gen():
l = np.random.randint(1, 100)
t = np.linspace(1,l,1000)
v = 10*np.sin(t/(2*np.pi)) / 1000
return v
def noise_gen(v):
w = v ** 2
target_snr_db = 20
sig_avg_watts = np.mean(w)
sig_avg_db = 10 * np.log10(sig_avg_watts)
noise_avg_db = sig_avg_db - target_snr_db
noise_avg_watts = 10 ** (noise_avg_db / 10)
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(w))
y_volts = v + noise_volts
return y_volts
v = signal_gen()
plt.subplot(2,1,1)
plt.title("Random Signal")
plt.plot( v)
plt.show()
plt.subplot(2,1,2)
plt.title("Random Signal with noise")
plt.plot(noise_gen(v))
plt.show()
signal = []
noisy_signal = []
for i in range(1000):
v = signal_gen()
signal.append(v)
noisy_signal.append(noise_gen(v))
import torch.nn as nn
import torch.nn.functional as F
class DeNoise(nn.Module):
def __init__(self):
super(DeNoise, self).__init__()
self.lin1 = nn.Linear(1000, 800)
self.lin_t1 = nn.Linear(800, 1000)
def forward(self, x):
x = F.tanh(self.lin1(x))
x = self.lin_t1(x)
return x
model = DeNoise().cuda()
print(model)
import torch
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
def train(n_epochs , model):
training_loss = []
for epoch in range(n_epochs):
trainloss = 0.0
for sig, noisig in zip(signal, noisy_signal):
sig = torch.Tensor(sig).cuda()
noisig = torch.Tensor(noisig).cuda()
optimizer.zero_grad()
output = model(noisig)
loss = criterion(output , sig)
loss.backward()
optimizer.step()
trainloss += loss.item()
print("Epoch: {} , Training Loss: {}".format(epoch + 1 , trainloss / len(signal)))
training_loss.append(trainloss / len(signal))
plt.plot(training_loss)
print("Training Completed !!!")
train(10, model)
def plot(i):
pred = model(torch.Tensor(signal[i]).cuda()).cpu()
plt.subplot(4,1,1)
plt.title("Original Signal")
plt.xlabel("Voltage")
plt.ylabel("Time")
plt.plot(signal[i])
plt.show()
plt.subplot(4,1,2)
plt.title("Noisy Signal")
plt.xlabel("Voltage")
plt.ylabel("Time")
plt.plot(noisy_signal[i])
plt.show()
plt.subplot(4,1,3)
plt.title("Predicted Signal")
plt.xlabel("Voltage")
plt.ylabel("Time")
plt.plot(pred.detach().numpy())
plt.show()