Skip to content

Commit

Permalink
Moving estimate_filter_length into datasets module (#90)
Browse files Browse the repository at this point in the history
  • Loading branch information
gvanhoy authored May 5, 2023
1 parent 951986e commit 7db2181
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 42 deletions.
19 changes: 19 additions & 0 deletions torchsig/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import numpy as np


def estimate_filter_length(
transition_bandwidth: float, attenuation_db: int = 72, sample_rate: float = 1.0
) -> int:
# estimate the length of an FIR filter using harris' approximaion,
# N ~= (sampling rate/transition bandwidth)*(sidelobe attenuation in dB / 22)
# fred harris, Multirate Signal Processing for Communication Systems,
# Second Edition, p.59
filter_length = int(
np.round((sample_rate / transition_bandwidth) * (attenuation_db / 22))
)

# odd-length filters are desirable because they do not introduce a half-sample delay
if np.mod(filter_length, 2) == 0:
filter_length += 1

return filter_length
35 changes: 6 additions & 29 deletions torchsig/datasets/synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from torchsig.utils.dataset import SignalDataset
from torchsig.utils.types import SignalData, SignalDescription
from torchsig.transforms.functional import IntParameter, FloatParameter
from torchsig.datasets import estimate_filter_length


def torchsig_convolve(
Expand Down Expand Up @@ -169,21 +170,6 @@ class DigitalModulationDataset(ConcatDataset):
"""

def estimate_filter_length(attenuation_db, sample_rate, transition_bandwidth):
# estimate the length of an FIR filter using harris' approximaion,
# N ~= (sampling rate/transition bandwidth)*(sidelobe attenuation in dB / 22)
# fred harris, Multirate Signal Processing for Communication Systems,
# Second Edition, p.59
filter_length = int(
np.round((sample_rate / transition_bandwidth) * (attenuation_db / 22))
)

# odd-length filters are desirable because they do not introduce a half-sample delay
if np.mod(filter_length, 2) == 0:
filter_length += 1

return filter_length

def __init__(
self,
modulations: Optional[Union[List, Tuple]] = ("bpsk", "2gfsk"),
Expand Down Expand Up @@ -380,11 +366,8 @@ def _generate_samples(self, item: Tuple) -> np.ndarray:
(self.iq_samples_per_symbol * len(symbols),), dtype=np.complex64
)
zero_padded[:: self.iq_samples_per_symbol] = symbols

# estimate total filter length for pulse shape
attenuation_db = 72 # sidelobe attenuation level, 72 dB -> 12 bit dynamic range
pulse_shape_filter_length = DigitalModulationDataset.estimate_filter_length(
attenuation_db, 1, signal_description.excess_bandwidth
pulse_shape_filter_length = estimate_filter_length(
signal_description.excess_bandwidth
)
pulse_shape_filter_span = int(
(pulse_shape_filter_length - 1) / 2
Expand Down Expand Up @@ -993,18 +976,12 @@ def _generate_samples(self, item: Tuple) -> np.ndarray:
# accept the cutoff-frequency of the filter as external
# parameter, randomized as part of outer framework
cutoff_frequency = bandwidth
# define the sidelobe levels of the filter (in dB)
attenuation_db = 72
# using a normalized sampling rate of 1 such that fs/2 = 1/2
sample_rate = 1
# calculate transition bandwidth. a larger cutoff frequency requires
# a smaller transition bandwidth, and a smaller cutoff frequency
# allows for a larger transition bandwidth
transition_bandwidth = (sample_rate / 2 - (cutoff_frequency)) / 4
transition_bandwidth = (1.0 / 2 - (cutoff_frequency)) / 4
# estimate number of taps needed to implement filter
num_taps = DigitalModulationDataset.estimate_filter_length(
attenuation_db, sample_rate, transition_bandwidth
)
num_taps = estimate_filter_length(transition_bandwidth)

# design the filter
taps = sp.firwin(
Expand All @@ -1013,7 +990,7 @@ def _generate_samples(self, item: Tuple) -> np.ndarray:
width=transition_bandwidth,
window=sp.get_window("blackman", num_taps),
scale=True,
fs=sample_rate,
fs=1,
)
# apply the filter
modulated = torchsig_convolve(modulated, taps, gpu=self.use_gpu)
Expand Down
20 changes: 7 additions & 13 deletions torchsig/datasets/wideband.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
uniform_continuous_distribution,
uniform_discrete_distribution,
)
from torchsig.datasets import estimate_filter_length


class SignalBurst(SignalDescription):
Expand Down Expand Up @@ -89,9 +90,7 @@ def generate_iq(self):
center = lower + bandwidth / 2

# Filter noise
num_taps = int(
2 * np.ceil(50 * 2 * np.pi / bandwidth / 0.125 / 22)
) # fred harris rule of thumb *2
num_taps = estimate_filter_length((0.5 - 0.02 * bandwidth) / 4)
sinusoid = np.exp(2j * np.pi * center * np.linspace(0, num_taps - 1, num_taps))
taps = signal.firwin(
num_taps,
Expand Down Expand Up @@ -366,10 +365,7 @@ def generate_iq(self):
-int(self.num_iq_samples * self.duration * oversample) :
]

# Filter around center
num_taps = int(
2 * np.ceil(50 * 2 * np.pi / 0.5 / 0.125 / 22)
) # fred harris rule of thumb * 2
num_taps = estimate_filter_length((0.5 - 0.02 / oversample) / 4)

taps = signal.firwin(
num_taps,
Expand Down Expand Up @@ -472,9 +468,8 @@ def generate_iq(self):
)

# Filter around center
num_taps = int(
2 * np.ceil(50 * 2 * np.pi / 0.5 / 0.125 / 22)
) # fred harris rule of thumb * 2
num_taps = estimate_filter_length((0.5 - 0.02 * 0.5) / 4)

taps = signal.firwin(
num_taps,
0.5,
Expand Down Expand Up @@ -580,9 +575,8 @@ def generate_iq(self):
)

# Filter around center
num_taps = int(
2 * np.ceil(50 * 2 * np.pi / 0.5 / 0.125 / 22)
) # fred harris rule of thumb * 2
num_taps = estimate_filter_length((0.5 - 0.5 * 0.02) / 4)

taps = signal.firwin(
num_taps,
0.5,
Expand Down

0 comments on commit 7db2181

Please sign in to comment.