[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Commit-gnuradio] [gnuradio] 03/07: digital: burst_shaper block passes m
From: |
git |
Subject: |
[Commit-gnuradio] [gnuradio] 03/07: digital: burst_shaper block passes meaningful QA tests |
Date: |
Wed, 22 Apr 2015 03:12:51 +0000 (UTC) |
This is an automated email from the git hooks/post-receive script.
jcorgan pushed a commit to branch master
in repository gnuradio.
commit 064b5e310eeb42af8089ae9ad612e4c7ea9a3f85
Author: Sean Nowlan <address@hidden>
Date: Sun Apr 19 13:33:27 2015 -0400
digital: burst_shaper block passes meaningful QA tests
---
.../include/gnuradio/digital/burst_shaper_XX.h.t | 2 +-
gr-digital/lib/burst_shaper_XX_impl.cc.t | 245 +++++++++++++++++----
gr-digital/lib/burst_shaper_XX_impl.h.t | 40 ++--
gr-digital/python/digital/qa_burst_shaper.py | 245 ++++++++++++++++++++-
4 files changed, 462 insertions(+), 70 deletions(-)
diff --git a/gr-digital/include/gnuradio/digital/burst_shaper_XX.h.t
b/gr-digital/include/gnuradio/digital/burst_shaper_XX.h.t
index 6825155..43d422b 100644
--- a/gr-digital/include/gnuradio/digital/burst_shaper_XX.h.t
+++ b/gr-digital/include/gnuradio/digital/burst_shaper_XX.h.t
@@ -54,7 +54,7 @@ namespace gr {
* \li input: stream of @I_TYPE@
* \li output: stream of @O_TYPE@
*/
- class DIGITAL_API @NAME@ : virtual public gr::block
+ class DIGITAL_API @NAME@ : virtual public block
{
public:
// gr::digital::@BASE_NAME@::sptr
diff --git a/gr-digital/lib/burst_shaper_XX_impl.cc.t
b/gr-digital/lib/burst_shaper_XX_impl.cc.t
index cdbc7ce..bd92665 100644
--- a/gr-digital/lib/burst_shaper_XX_impl.cc.t
+++ b/gr-digital/lib/burst_shaper_XX_impl.cc.t
@@ -26,9 +26,18 @@
#include "config.h"
#endif
+#include <boost/format.hpp>
#include <gnuradio/io_signature.h>
+#include <volk/volk.h>
#include "@address@hidden"
+#ifndef VOLK_MULT_gr_complex
+#define VOLK_MULT_gr_complex volk_32fc_x2_multiply_32fc
+#endif
+#ifndef VOLK_MULT_float
+#define VOLK_MULT_float volk_32f_x2_multiply_32f
+#endif
+
namespace gr {
namespace digital {
@@ -50,28 +59,32 @@ namespace gr {
: gr::block("@BASE_NAME@",
gr::io_signature::make(1, 1, sizeof(@I_TYPE@)),
gr::io_signature::make(1, 1, sizeof(@O_TYPE@))),
- d_up_flank(taps.begin(), taps.begin() + taps.size()/2 + taps.size()%2),
- d_down_flank(taps.begin() + taps.size()/2, taps.end()),
+ d_up_ramp(taps.begin(), taps.begin() + taps.size()/2 + taps.size()%2),
+ d_down_ramp(taps.begin() + taps.size()/2, taps.end()),
d_nprepad(pre_padding),
d_npostpad(post_padding),
d_insert_phasing(insert_phasing),
d_length_tag_key(pmt::string_to_symbol(length_tag_name)),
- d_state(STATE_WAITING)
+ d_ncopy(0),
+ d_limit(0),
+ d_index(0),
+ d_nprocessed(0),
+ d_finished(false),
+ d_state(STATE_WAIT)
{
- assert(d_up_flank.size() == d_down_flank.size());
-
- d_up_phasing.resize(d_up_flank.size());
- d_down_phasing.resize(d_up_flank.size());
- if(d_insert_phasing) {
- @I_TYPE@ symbol;
- for(unsigned int i = 0; i < d_up_flank.size(); i++) {
- symbol = (i%2) ? @I_TYPE@(1.0f) : @I_TYPE@(-1.0f);
- d_up_phasing.push_back(symbol * d_up_flank[i]);
- d_down_phasing.push_back(symbol * d_down_flank[i]);
- }
+ assert(d_up_ramp.size() == d_down_ramp.size());
+
+ d_up_phasing.resize(d_up_ramp.size());
+ d_down_phasing.resize(d_down_ramp.size());
+
+ @I_TYPE@ symbol;
+ for(unsigned int i = 0; i < d_up_ramp.size(); i++) {
+ symbol = (i%2 == 0) ? @I_TYPE@(1.0f) : @I_TYPE@(-1.0f);
+ d_up_phasing[i] = symbol * d_up_ramp[i];
+ d_down_phasing[i] = symbol * d_down_ramp[i];
}
- set_relative_rate(1.0);
+ //set_relative_rate(1.0);
set_tag_propagation_policy(TPP_DONT);
}
@@ -80,8 +93,9 @@ namespace gr {
}
void
- @IMPL_NAME@::forecast(int noutput_items, gr_vector_int
&ninput_items_required)
- {
+ @IMPL_NAME@::forecast(int noutput_items,
+ gr_vector_int &ninput_items_required) {
+ //if(d_state == STATE_COPY
ninput_items_required[0] = noutput_items;
}
@@ -91,71 +105,157 @@ namespace gr {
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
- const @I_TYPE@ *in = (const @I_TYPE@ *) input_items[0];
- @O_TYPE@ *out = (@O_TYPE@ *) output_items[0];
+ const @I_TYPE@ *in = reinterpret_cast<const @I_TYPE@
*>(input_items[0]);
+ @O_TYPE@ *out = reinterpret_cast<@O_TYPE@ *>(output_items[0]);
int nwritten = 0;
int nread = 0;
- int nstart = 0;
- int nstop = 0;
- int nremaining = 0;
- int nprocessed = 0;
+ int nspace = 0;
+ int nskip = 0;
uint64_t curr_tag_index = nitems_read(0);
- std::vector<tag_t> tags;
+ std::vector<tag_t> length_tags, tags;
+ get_tags_in_window(length_tags, 0, 0, ninput_items[0],
d_length_tag_key);
get_tags_in_window(tags, 0, 0, ninput_items[0]);
- std::sort(tags.begin(), tags.end(), tag_t::offset_compare);
+ std::sort(length_tags.rbegin(), length_tags.rend(),
tag_t::offset_compare);
+ std::sort(tags.rbegin(), tags.rend(), tag_t::offset_compare);
while((nwritten < noutput_items) && (nread < ninput_items[0])) {
- nremaining = noutput_items - nwritten;
+ if(d_finished) {
+ d_finished = false;
+ break;
+ }
+ nspace = noutput_items - nwritten;
switch(d_state) {
- case(STATE_WAITING):
- curr_tag_index = tags[0].offset;
- d_nremaining = pmt::to_long(tags[0].value) +
- prefix_length() + suffix_length();
- nprocessed += (int)curr_tag_index; // drop orphaned
samples
- add_length_tag(nwritten);
- enter_prepad();
+ case(STATE_WAIT):
+ if(!tags.empty()) {
+ curr_tag_index = tags.back().offset;
+ d_ncopy = pmt::to_long(tags.back().value);
+ tags.pop_back();
+ nskip = (int)(curr_tag_index - d_nprocessed);
+ add_length_tag(nwritten);
+ enter_prepad();
+ }
+ else {
+ nskip = ninput_items[0] - nread;
+ }
+ if(nskip > 0) {
+ GR_LOG_WARN(d_logger,
+ boost::format("Dropping %1% samples") %
+ nskip);
+ nread += nskip;
+ d_nprocessed += nskip;
+ }
break;
case(STATE_PREPAD):
- std::memset(out, 0x00, nprocess * sizeof(@O_TYPE@));
+ write_padding(out, nwritten, nspace);
+ if(d_index == d_limit)
+ enter_rampup();
break;
case(STATE_RAMPUP):
- nprocess = std::min(
- if(d_insert_phasing) {
-
- }
+ apply_ramp(out, in, nwritten, nread, nspace);
+ if(d_index == d_limit)
+ enter_copy();
break;
case(STATE_COPY):
- std::memcpy(out, in, nprocess * sizeof(@O_TYPE@));
+ copy_items(out, in, nwritten, nread, nspace);
+ if(d_index == d_limit)
+ enter_rampdown();
break;
case(STATE_RAMPDOWN):
+ apply_ramp(out, in, nwritten, nread, nspace);
+ if(d_index == d_limit)
+ enter_postpad();
break;
case(STATE_POSTPAD):
- std::memset(out, 0x00, nprocess * sizeof(@O_TYPE@));
+ write_padding(out, nwritten, nspace);
+ if(d_index == d_limit)
+ enter_wait();
break;
default:
- throw std::runtime_error("burst_shaper: invalid state
reached");
+ throw std::runtime_error("@BASE_NAME@: invalid state");
}
}
- consume_each (nconsumed);
+ consume_each(nread);
+
+ return nwritten;
+ }
+
+ int
+ @IMPL_NAME@::prefix_length() const {
+ return (d_insert_phasing) ?
+ d_nprepad + d_up_ramp.size() : d_nprepad;
+ }
+
+ int
+ @IMPL_NAME@::suffix_length() const {
+ return (d_insert_phasing) ?
+ d_npostpad + d_down_ramp.size() : d_npostpad;
+ }
+
+ void
+ @IMPL_NAME@::write_padding(@O_TYPE@ *&dst, int &nwritten, int nspace) {
+ int nprocess = std::min(d_limit - d_index, nspace);
+ std::memset(dst, 0x00, nprocess * sizeof(@O_TYPE@));
+ dst += nprocess;
+ nwritten += nprocess;
+ d_index += nprocess;
+ }
+
+ void
+ @IMPL_NAME@::copy_items(@O_TYPE@ *&dst, const @I_TYPE@ *&src, int
&nwritten,
+ int &nread, int nspace) {
+ int nprocess = std::min(d_limit - d_index, nspace);
+ std::memcpy(dst, src, nprocess * sizeof(@O_TYPE@));
+ dst += nprocess;
+ nwritten += nprocess;
+ src += nprocess;
+ nread += nprocess;
+ d_index += nprocess;
+ }
+
+ void
+ @IMPL_NAME@::apply_ramp(@O_TYPE@ *&dst, const @I_TYPE@ *&src, int
&nwritten,
+ int &nread, int nspace) {
+ int nprocess = std::min(d_limit - d_index, nspace);
+ @O_TYPE@ *phasing;
+ const @O_TYPE@ *ramp;
+
+ if(d_state == STATE_RAMPUP) {
+ phasing = &d_up_phasing[d_index];
+ ramp = &d_up_ramp[d_index];
+ }
+ else {
+ phasing = &d_down_phasing[d_index];
+ ramp = &d_down_ramp[d_index];
+ }
+
+ if(d_insert_phasing)
+ std::memcpy(dst, phasing, nprocess * sizeof(@O_TYPE@));
+ else {
+ address@hidden@(dst, src, ramp, nprocess);
+ src += nprocess;
+ nread += nprocess;
+ }
- // Tell runtime system how many output items we produced.
- return noutput_items;
+ dst += nprocess;
+ nwritten += nprocess;
+ d_index += nprocess;
}
void
@IMPL_NAME@::add_length_tag(int offset)
{
add_item_tag(0, nitems_written(0) + offset, d_length_tag_key,
- pmt::from_long(d_nremaining),
+ pmt::from_long(d_ncopy + prefix_length() +
+ suffix_length()),
pmt::string_to_symbol(name()));
}
@@ -170,5 +270,60 @@ namespace gr {
add_item_tag(0, new_tag);
}
}
+
+ void
+ @IMPL_NAME@::enter_wait() {
+ d_finished = true;
+ d_nprocessed += d_ncopy;
+ d_index = 0;
+ d_state = STATE_WAIT;
+ }
+
+ void
+ @IMPL_NAME@::enter_prepad() {
+ d_limit = d_nprepad;
+ d_index = 0;
+ d_state = STATE_PREPAD;
+ }
+
+ void
+ @IMPL_NAME@::enter_rampup() {
+ if(d_insert_phasing)
+ d_limit = d_up_ramp.size();
+ else
+ d_limit = std::min((size_t)(d_ncopy/2), d_up_ramp.size());
+ d_index = 0;
+ d_state = STATE_RAMPUP;
+ }
+
+ void
+ @IMPL_NAME@::enter_copy() {
+ if(d_insert_phasing)
+ d_limit = d_ncopy;
+ else
+ d_limit = d_ncopy - std::min((size_t)((d_ncopy/2)*2),
+ d_up_ramp.size() +
+ d_down_ramp.size());
+ d_index = 0;
+ d_state = STATE_COPY;
+ }
+
+ void
+ @IMPL_NAME@::enter_rampdown() {
+ if(d_insert_phasing)
+ d_limit = d_down_ramp.size();
+ else
+ d_limit = std::min((size_t)(d_ncopy/2), d_down_ramp.size());
+ d_index = 0;
+ d_state = STATE_RAMPDOWN;
+ }
+
+ void
+ @IMPL_NAME@::enter_postpad() {
+ d_limit = d_npostpad;
+ d_index = 0;
+ d_state = STATE_POSTPAD;
+ }
+
} /* namespace digital */
} /* namespace gr */
diff --git a/gr-digital/lib/burst_shaper_XX_impl.h.t
b/gr-digital/lib/burst_shaper_XX_impl.h.t
index 66b5e24..90c7df8 100644
--- a/gr-digital/lib/burst_shaper_XX_impl.h.t
+++ b/gr-digital/lib/burst_shaper_XX_impl.h.t
@@ -33,29 +33,38 @@ namespace gr {
class @IMPL_NAME@ : public @BASE_NAME@
{
protected:
- enum state_t {STATE_WAITING, STATE_PREPAD, STATE_RAMPUP,
+ enum state_t {STATE_WAIT, STATE_PREPAD, STATE_RAMPUP,
STATE_COPY, STATE_RAMPDOWN, STATE_POSTPAD};
private:
- const std::vector<@O_TYPE@> d_up_flank;
- const std::vector<@O_TYPE@> d_down_flank;
+ const std::vector<@O_TYPE@> d_up_ramp;
+ const std::vector<@O_TYPE@> d_down_ramp;
const int d_nprepad;
const int d_npostpad;
const bool d_insert_phasing;
const pmt::pmt_t d_length_tag_key;
std::vector<@O_TYPE@> d_up_phasing;
std::vector<@O_TYPE@> d_down_phasing;
- uint64_t d_nremaining;
+ int d_ncopy;
+ int d_limit;
+ int d_index;
+ uint64_t d_nprocessed;
+ bool d_finished;
state_t d_state;
- void enter_waiting() { d_state = STATE_WAITING; }
- void enter_prepad() { d_state = STATE_PREPAD; }
- void enter_rampup() { d_state = STATE_RAMPUP; }
- void enter_copy() { d_state = STATE_COPY; }
- void enter_rampdown() { d_state = STATE_RAMPDOWN; }
- void enter_postpad() { d_state = STATE_POSTPAD; }
+ void write_padding(@O_TYPE@ *&dst, int &nwritten, int nspace);
+ void copy_items(@O_TYPE@ *&dst, const @I_TYPE@ *&src, int &nwritten,
+ int &nread, int nspace);
+ void apply_ramp(@O_TYPE@ *&dst, const @I_TYPE@ *&src, int &nwritten,
+ int &nread, int nspace);
void add_length_tag(int offset);
- void propagate_tag(tag_t &tag, int offset);
+ void propagate_tags(std::vector<tag_t> &tags, int offset);
+ void enter_wait();
+ void enter_prepad();
+ void enter_rampup();
+ void enter_copy();
+ void enter_rampdown();
+ void enter_postpad();
public:
@IMPL_NAME@(const std::vector<@O_TYPE@> &taps, int pre_padding,
@@ -63,8 +72,7 @@ namespace gr {
const std::string &length_tag_name);
address@hidden@();
- void forecast(int noutput_items,
- gr_vector_int &ninput_items_required);
+ void forecast(int noutput_items, gr_vector_int &ninput_items_required);
int general_work(int noutput_items,
gr_vector_int &ninput_items,
@@ -72,10 +80,8 @@ namespace gr {
gr_vector_void_star &output_items);
int pre_padding() const { return d_nprepad; }
int post_padding() const { return d_npostpad; }
- int prefix_length() const { return d_nprepad +
- d_up_flank.size(); }
- int suffix_length() const { return d_npostpad +
- d_down_flank.size(); }
+ int prefix_length() const;
+ int suffix_length() const;
};
} // namespace digital
diff --git a/gr-digital/python/digital/qa_burst_shaper.py
b/gr-digital/python/digital/qa_burst_shaper.py
index 6ba4ac0..d00c230 100755
--- a/gr-digital/python/digital/qa_burst_shaper.py
+++ b/gr-digital/python/digital/qa_burst_shaper.py
@@ -22,10 +22,25 @@
#
from gnuradio import gr, gr_unittest
-from gnuradio import blocks
-import digital_swig as digital
+from gnuradio import blocks, digital
+import pmt
+import numpy as np
-class qa_burst_shaper_cc (gr_unittest.TestCase):
+def make_length_tag(offset, length):
+ return gr.python_to_tag({'offset' : offset,
+ 'key' : pmt.intern('packet_len'),
+ 'value' : pmt.from_long(length),
+ 'srcid' : pmt.intern('qa_burst_shaper')})
+
+def compare_tags(a, b):
+ a = gr.tag_to_python(a)
+ b = gr.tag_to_python(b)
+ return a.key == b.key and a.offset == b.offset and \
+ a.value == b.value
+ #return a.key == b.key and a.offset == b.offset and \
+ # a.srcid == b.srcid and a.value == b.value
+
+class qa_burst_shaper (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
@@ -33,11 +48,227 @@ class qa_burst_shaper_cc (gr_unittest.TestCase):
def tearDown (self):
self.tb = None
- def test_001_t (self):
- # set up fg
+ def test_ff (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(length + 10) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length),)
+ expected = np.concatenate((np.zeros(prepad), window[0:5],
+ np.ones(length - len(window)), window[5:10],
+ np.zeros(postpad)))
+ etag = make_length_tag(0, length + prepad + postpad)
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_cc (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(length + 10,
+ dtype=complex) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5, dtype=complex),
+ -4.0*np.ones(5, dtype=complex)))
+ tags = (make_length_tag(0, length),)
+ expected = np.concatenate((np.zeros(prepad, dtype=complex),
window[0:5],
+ np.ones(length - len(window),
dtype=complex),
+ window[5:10], np.zeros(postpad,
+ dtype=complex)))
+ etag = make_length_tag(0, length + prepad + postpad)
+
+ # flowgraph
+ source = blocks.vector_source_c(data, tags=tags)
+ shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_c()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_ff_with_phasing (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(length + 10) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length),)
+ phasing = np.zeros(5)
+ for i in xrange(5):
+ phasing[i] = ((-1.0)**i)
+ expected = np.concatenate((np.zeros(prepad), phasing*window[0:5],
+ np.ones(length), phasing*window[5:10],
+ np.zeros(postpad)))
+ etag = make_length_tag(0, length + prepad + postpad + len(window))
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad,
+ insert_phasing=True)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_cc_with_phasing (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(length + 10,
+ dtype=complex) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5, dtype=complex),
+ -4.0*np.ones(5, dtype=complex)))
+ tags = (make_length_tag(0, length),)
+ phasing = np.zeros(5, dtype=complex)
+ for i in xrange(5):
+ phasing[i] = complex((-1.0)**i)
+ expected = np.concatenate((np.zeros(prepad, dtype=complex),
+ phasing*window[0:5],
+ np.ones(length, dtype=complex),
+ phasing*window[5:10],
+ np.zeros(postpad, dtype=complex)))
+ etag = make_length_tag(0, length + prepad + postpad + len(window))
+
+ # flowgraph
+ source = blocks.vector_source_c(data, tags=tags)
+ shaper = digital.burst_shaper_cc(window, pre_padding=prepad,
+ post_padding=postpad,
+ insert_phasing=True)
+ sink = blocks.vector_sink_c()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertComplexTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_odd_window (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(length + 10) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
+ -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length),)
+ expected = np.concatenate((np.zeros(prepad), window[0:6],
+ np.ones(length - len(window) - 1),
+ window[5:11], np.zeros(postpad)))
+ etag = make_length_tag(0, length + prepad + postpad)
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_short_burst (self):
+ prepad = 10
+ postpad = 10
+ length = 9
+ data = np.ones(length + 10) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5), -3.0*np.ones(1),
+ -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length),)
+ expected = np.concatenate((np.zeros(prepad), window[0:4],
+ np.ones(1), window[5:9],
+ np.zeros(postpad)))
+ etag = make_length_tag(0, length + prepad + postpad)
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
+ self.assertTrue(compare_tags(sink.tags()[0], etag))
+
+ def test_consecutive_bursts (self):
+ prepad = 10
+ postpad = 10
+ length1 = 15
+ length2 = 25
+ data = np.concatenate((np.ones(length1), -1.0*np.ones(length2),
+ np.zeros(10))) # need 10 more to push things
through
+ window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length1), make_length_tag(length1, length2))
+ expected = np.concatenate((np.zeros(prepad), window[0:5],
+ np.ones(length1 - len(window)),
window[5:10],
+ np.zeros(postpad + prepad),
-1.0*window[0:5],
+ -1.0*np.ones(length2 - len(window)),
+ -1.0*window[5:10], np.zeros(postpad)))
+ etags = (make_length_tag(0, length1 + prepad + postpad),
+ make_length_tag(length1 + prepad + postpad,
+ length2 + prepad + postpad))
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
+ self.tb.run ()
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(), expected, 6)
+ for i in xrange(len(etags)):
+ self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
+
+ def test_tag_gap (self):
+ prepad = 10
+ postpad = 10
+ length = 20
+ data = np.ones(2*length + 10) # need 10 more to push things through
+ window = np.concatenate((-2.0*np.ones(5), -4.0*np.ones(5)))
+ tags = (make_length_tag(0, length), make_length_tag(length + 5,
length))
+ expected = np.concatenate((np.zeros(prepad), window[0:5],
+ np.ones(length - len(window)), window[5:10],
+ np.zeros(postpad)))
+ etags = (make_length_tag(0, length + prepad + postpad),
+ make_length_tag(length + prepad + postpad,
+ length + prepad + postpad))
+
+ # flowgraph
+ source = blocks.vector_source_f(data, tags=tags)
+ shaper = digital.burst_shaper_ff(window, pre_padding=prepad,
+ post_padding=postpad)
+ sink = blocks.vector_sink_f()
+ self.tb.connect(source, shaper, sink)
self.tb.run ()
- # check data
+
+ # checks
+ self.assertFloatTuplesAlmostEqual(sink.data(),
+ np.concatenate((expected, expected),
+ 6))
+ for i in xrange(len(etags)):
+ self.assertTrue(compare_tags(sink.tags()[i], etags[i]))
if __name__ == '__main__':
- gr_unittest.run(qa_burst_shaper_cc, "qa_burst_shaper_cc.xml")
+ gr_unittest.run(qa_burst_shaper, "qa_burst_shaper.xml")
- [Commit-gnuradio] [gnuradio] branch master updated (fdfece1 -> d8a491f), git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 05/07: digital: install burst shaper block example GRC, git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 06/07: Merge branch 'maint', git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 07/07: Merge remote-tracking branch 'nowls/burst_shaping', git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 02/07: digital: wip: more progress, git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 01/07: digital: wip: burst shaper compiles and installs, git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 04/07: digital: added GRC example for burst shaper blocks, git, 2015/04/21
- [Commit-gnuradio] [gnuradio] 03/07: digital: burst_shaper block passes meaningful QA tests,
git <=