The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _read_from_header(self):
a, b, c = self._get_header()
header = a
header['data_offset'] = b
header['nb_channels'] = c
#header['dtype_offset'] = int(header['ADC zero'])
header['gain'] = float(re.findall("\d+\.\d+", header['El'])[0])
header['data_dtype'] = self.params['data_dtype']
self.data = numpy.memmap(self.file_name, offset=header['data_offset'], dtype=header['data_dtype'], mode='r')
self.size = len(self.data)
self._shape = (self.size//header['nb_channels'], header['nb_channels'])
del self.data
return header
Example 2
def test_validating(self):
#mpi_launch('fitting', self.file_name, 2, 0, 'False')
a, b = os.path.splitext(os.path.basename(self.file_name))
file_name, ext = os.path.splitext(self.file_name)
file_out = os.path.join(os.path.abspath(file_name), a)
result_name = os.path.join(file_name, 'injected')
spikes = {}
result = h5py.File(os.path.join(result_name, '%s.result.hdf5' %a))
for key in result.get('spiketimes').keys():
spikes[key] = result.get('spiketimes/%s' %key)[:]
juxta_file = file_out + '.juxta.dat'
f = numpy.memmap(juxta_file, shape=(self.length,1), dtype=self.parser.get('validating', 'juxta_dtype'), mode='w+')
f[spikes['temp_9']] = 100
del f
mpi_launch('validating', self.file_name, 2, 0, 'False')
Example 3
def _readData1(self, fd, meta, mmap=False, **kwds):
## Read array data from the file descriptor for MetaArray v1 files
## read in axis values for any axis that specifies a length
frameSize = 1
for ax in meta['info']:
if 'values_len' in ax:
ax['values'] = np.fromstring(fd.read(ax['values_len']), dtype=ax['values_type'])
frameSize *= ax['values_len']
del ax['values_len']
del ax['values_type']
self._info = meta['info']
if not kwds.get("readAllData", True):
return
## the remaining data is the actual array
if mmap:
subarr = np.memmap(fd, dtype=meta['type'], mode='r', shape=meta['shape'])
else:
subarr = np.fromstring(fd.read(), dtype=meta['type'])
subarr.shape = meta['shape']
self._data = subarr
Example 4
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='
shape=((filesize - 16384) / 4 / 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
Example 5
def __mmap_nev_file(self, filename):
""" Memory map the Neuralynx .nev file """
nev_dtype = np.dtype([
('reserved', '
('system_id', '
('data_size', '
('timestamp', '
('event_id', '
('ttl_input', '
('crc_check', '
('dummy1', '
('dummy2', '
('extra', '
('event_string', 'a128'),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nev_dtype, mode='r', offset=16384)
else:
return None
Example 6
def __read_nsx_data_variant_b(self, nsx_nb):
"""
Extract nsx data (blocks) from a 2.2 or 2.3 .nsx file. Blocks can arise
if the recording was paused by the user.
"""
filename = '.'.join([self._filenames['nsx'], 'ns%i' % nsx_nb])
data = {}
for data_bl in self.__nsx_data_header[nsx_nb].keys():
# get shape and offset of data
shape = (
self.__nsx_data_header[nsx_nb][data_bl]['nb_data_points'],
self.__nsx_basic_header[nsx_nb]['channel_count'])
offset = \
self.__nsx_data_header[nsx_nb][data_bl]['offset_to_data_block']
# read data
data[data_bl] = np.memmap(
filename, dtype='int16', shape=shape, offset=offset)
return data
Example 7
def __read_nev_data(self, nev_data_masks, nev_data_types):
"""
Extract nev data from a 2.1 or 2.2 .nev file
"""
filename = '.'.join([self._filenames['nev'], 'nev'])
data_size = self.__nev_basic_header['bytes_in_data_packets']
header_size = self.__nev_basic_header['bytes_in_headers']
# read all raw data packets and markers
dt0 = [
('timestamp', 'uint32'),
('packet_id', 'uint16'),
('value', 'S{0}'.format(data_size - 6))]
raw_data = np.memmap(filename, offset=header_size, dtype=dt0)
masks = self.__nev_data_masks(raw_data['packet_id'])
types = self.__nev_data_types(data_size)
data = {}
for k, v in nev_data_masks.items():
data[k] = raw_data.view(types[k][nev_data_types[k]])[masks[k][v]]
return data
Example 8
def __get_nev_rec_times(self):
"""
Extracts minimum and maximum time points from a nev file.
"""
filename = '.'.join([self._filenames['nev'], 'nev'])
dt = [('timestamp', 'uint32')]
offset = \
self.__get_file_size(filename) - \
self.__nev_params('bytes_in_data_packets')
last_data_packet = np.memmap(filename, offset=offset, dtype=dt)[0]
n_starts = [0 * self.__nev_params('event_unit')]
n_stops = [
last_data_packet['timestamp'] * self.__nev_params('event_unit')]
return n_starts, n_stops
Example 9
def _readData1(self, fd, meta, mmap=False, **kwds):
## Read array data from the file descriptor for MetaArray v1 files
## read in axis values for any axis that specifies a length
frameSize = 1
for ax in meta['info']:
if 'values_len' in ax:
ax['values'] = np.fromstring(fd.read(ax['values_len']), dtype=ax['values_type'])
frameSize *= ax['values_len']
del ax['values_len']
del ax['values_type']
self._info = meta['info']
if not kwds.get("readAllData", True):
return
## the remaining data is the actual array
if mmap:
subarr = np.memmap(fd, dtype=meta['type'], mode='r', shape=meta['shape'])
else:
subarr = np.fromstring(fd.read(), dtype=meta['type'])
subarr.shape = meta['shape']
self._data = subarr
Example 10
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers