Skip to content

Commit

Permalink
TST: batched stimulus computation
Browse files Browse the repository at this point in the history
ENH: basic functionality for solving issue #1
  • Loading branch information
anwarnunez committed Apr 11, 2020
1 parent e579cea commit 0c2a2f3
Showing 1 changed file with 37 additions and 3 deletions.
40 changes: 37 additions & 3 deletions moten/tests/test_api.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
import numpy as np

import matplotlib
matplotlib.interactive(False)

from moten import (io,
api,
core,
utils,
)

if 1:
video_file = 'http://anwarnunez.github.io/downloads/avsnr150s24fps_tiny.mp4'
nimages = 100
nimages = 200
small_size = (72, 128) # downsampled size (vdim, hdim) 16:9 aspect ratio
luminance_images = io.video2luminance(video_file, size=small_size, nimages=nimages)
#luminance_images = io.video2grey(video_file, size=small_size, nimages=nimages)
Expand All @@ -27,10 +26,12 @@
spatial_frequencies=(0,1,2,4))
print(pyramid_view)


# stimulus pyramid
##############################
pyramid = api.StimulusMotionEnergy(luminance_images,
stimulus_fps,
filter_temporal_width=10,
spatial_frequencies=(0,1,2,4))
# pyramid constructor is an attribute
print(pyramid.view)
Expand Down Expand Up @@ -61,3 +62,36 @@
hvresponses_sin, hvresponses_cos = pyramid.raw_projection(hvcentered_filters, dtype=DTYPE)
responses_manualhv = utils.log_compress(utils.sqrt_sum_squares(hvresponses_sin, hvresponses_cos))
assert np.allclose(hvcentered_responses, responses_manualhv)

# project specific stimuli
filter_stimulus_responses = pyramid.project_stimulus(luminance_images,
dtype=DTYPE)
assert np.allclose(filter_stimulus_responses, filter_responses)


##############################
# stimulus batches
##############################

# project subset of original stimuli
first_frame, last_frame = 100, 110
filter_stimulus_responses = pyramid.project_stimulus(
luminance_images[first_frame:last_frame], dtype=DTYPE)

# these differ because of convolution edge effects
assert not np.allclose(filter_stimulus_responses,
filter_responses[first_frame:last_frame])

# we have to include a window in order to avoid edge effects.
# This window is determined by the FOV of the motion-energy filter.
# In this case, the filter is the same width as the stimulus frame
# rate.
filter_width = pyramid.view.definition.filter_temporal_width
window = int(filter_width/2)
windowed_filter_stimulus_responses = pyramid.project_stimulus(
luminance_images[first_frame - window:last_frame + window],
dtype=DTYPE)

# Now they are exactly the same
assert np.allclose(windowed_filter_stimulus_responses[window:-window],
filter_responses[first_frame:last_frame])

0 comments on commit 0c2a2f3

Please sign in to comment.