Skip to content

Commit

Permalink
merged ares-dev into default.
Browse files Browse the repository at this point in the history
  • Loading branch information
mirochaj committed Aug 13, 2017
2 parents 3de9b1a + 00ca9a4 commit d9c5805
Show file tree
Hide file tree
Showing 312 changed files with 3,544 additions and 3,094 deletions.
5 changes: 5 additions & 0 deletions .hgignore
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,8 @@ input/bpass*
*.tar.gz
input/wmbasic
input/behroozi2013
input/inits/CosmoRec
input/starburst99/data/README
*.bak
*.backup
doc/example_*.py
Empty file modified CITATION
100644 → 100755
Empty file.
Empty file modified LICENSE
100644 → 100755
Empty file.
3 changes: 2 additions & 1 deletion README
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ and optionally,
- `h5py <http://www.h5py.org/>`_
- `mpi4py <http://mpi4py.scipy.org>`_
- `emcee <http://dan.iel.fm/emcee/current/>`_
- `python-progressbar <https://code.google.com/p/python-progressbar/>`_
- `distpy <https://bitbucket.org/ktausch/distpy>`_
- `progressbar2 <http://progressbar-2.readthedocs.io/en/latest/>`_
- `setuptools <https://pypi.python.org/pypi/setuptools>`_
- `mpmath <http://mpmath.googlecode.com/svn-history/r1229/trunk/doc/build/setup.html>`_
- `shapely <https://pypi.python.org/pypi/Shapely>`_
Expand Down
Empty file modified ares/__init__.py
100644 → 100755
Empty file.
118 changes: 88 additions & 30 deletions ares/analysis/BlobFactory.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,11 @@
rank = 0
size = 1

try:
import h5py
except ImportError:
pass

def get_k(s):
m = re.search(r"\[(\d+(\.\d*)?)\]", s)
return int(m.group(1))
Expand All @@ -57,7 +62,7 @@ def parse_attribute(blob_name, obj_base):

# Check for decimals
decimals = []
for i in range(1, len(blob_name) - 1):
for i in xrange(1, len(blob_name) - 1):
if blob_name[i-1].isdigit() and blob_name[i] == '.' \
and blob_name[i+1].isdigit():
decimals.append(i)
Expand All @@ -82,7 +87,7 @@ def parse_attribute(blob_name, obj_base):
# Nested attribute
blob_attr = None
obj_list = [obj_base]
for i in range(len(attr_split)):
for i in xrange(len(attr_split)):

# One particular chunk of the attribute name
s = attr_split[i]
Expand Down Expand Up @@ -114,11 +119,17 @@ class BlobFactory(object):
"""

def _parse_blobs(self):


hdf5_situation = False
try:
names = self.pf['blob_names']
except KeyError:
names = None
except TypeError:
hdf5_situation = True
f = h5py.File('%s.hdf5' % self.prefix, 'r')
names = f['blobs'].keys()
f.close()

if names is None:
self._blob_names = self._blob_ivars = None
Expand All @@ -131,8 +142,30 @@ def _parse_blobs(self):
assert type(names) in [list, tuple], \
"Must supply blob_names as list or tuple!"

self._blob_names = names
if 'blob_ivars' in self.pf:
if hdf5_situation:
f = h5py.File('%s.hdf5' % self.prefix, 'r')

_blob_ivars = []
_blob_ivarn = []
_blob_names = names
for name in names:
ivar = f['blobs'][name].attrs.get('ivar')

if ivar is None:
_blob_ivars.append(ivar)
else:
_blob_ivarn.append('unknown')
_blob_ivars.append(ivar.squeeze())

f.close()

# Re-organize...maybe eventually
self._blob_ivars = _blob_ivars
self._blob_ivarn = _blob_ivarn
self._blob_names = _blob_names

elif 'blob_ivars' in self.pf:
self._blob_names = names
if self.pf['blob_ivars'] is None:
self._blob_ivars = [None] * len(names)
else:
Expand All @@ -156,8 +189,9 @@ def _parse_blobs(self):
for l, pair in enumerate(element):
self._blob_ivarn[k].append(pair[0])
self._blob_ivars[k].append(pair[1])

else:
self._blob_names = names
self._blob_ivars = [None] * len(names)

self._blob_nd = []
Expand All @@ -169,7 +203,10 @@ def _parse_blobs(self):
if self._blob_ivars[i] is None:
self._blob_nd.append(0)
self._blob_dims.append(0)


if hdf5_situation:
continue

if self.pf['blob_funcs'] is None:
self._blob_funcs.append([None] * len(element))
elif self.pf['blob_funcs'][i] is None:
Expand Down Expand Up @@ -228,7 +265,7 @@ def blob_nbytes(self):

if not hasattr(self, '_blob_nbytes'):
nvalues = 0.
for i in range(self.blob_groups):
for i in xrange(self.blob_groups):
if self.blob_nd[i] == 0:
nvalues += len(self.blob_names[i])
else:
Expand All @@ -242,10 +279,16 @@ def blob_nbytes(self):
@property
def all_blob_names(self):
if not hasattr(self, '_all_blob_names'):
self._all_blob_names = []
for i in range(self.blob_groups):
self._all_blob_names.extend(self.blob_names[i])



if self.blob_groups is not None:
self._all_blob_names = []
for i in xrange(self.blob_groups):
self._all_blob_names.extend(self.blob_names[i])

else:
self._all_blob_names = self._blob_names

if len(set(self._all_blob_names)) != len(self._all_blob_names):
raise ValueError('Blobs must be unique!')

Expand All @@ -254,10 +297,17 @@ def all_blob_names(self):
@property
def blob_groups(self):
if not hasattr(self, '_blob_groups'):
if self.blob_nd is not None:
self._blob_groups = len(self.blob_nd)

nested = any(isinstance(i, list) for i in self.blob_names)

if nested:
if self.blob_nd is not None:
self._blob_groups = len(self.blob_nd)
else:
self._blob_groups = 0
else:
self._blob_groups = 0
self._blob_groups = None

return self._blob_groups

@property
Expand Down Expand Up @@ -312,7 +362,7 @@ def blobs(self):
return self._blobs

def get_ivars(self, name):
for i in range(self.blob_groups):
for i in xrange(self.blob_groups):
for j, blob in enumerate(self.blob_names[i]):
if blob == name:
break
Expand All @@ -327,7 +377,7 @@ def get_blob(self, name, ivar=None, tol=1e-2):
This is meant to recover a blob from a single simulation, i.e.,
NOT a whole slew of them from an MCMC.
"""
for i in range(self.blob_groups):
for i in xrange(self.blob_groups):
for j, blob in enumerate(self.blob_names[i]):
if blob == name:
break
Expand Down Expand Up @@ -365,7 +415,7 @@ def get_blob(self, name, ivar=None, tol=1e-2):
# Actually, we don't have to abide by that. As long as a function
# is provided we can evaluate the blob anywhere (with interp)

for n in range(2):
for n in xrange(2):
assert ivar[n] in self.blob_ivars[i][n]

k = list(self.blob_ivars[i][0]).index(ivar[0])
Expand Down Expand Up @@ -521,19 +571,27 @@ def blob_info(self, name):
index of blob group, index of element within group, dimensionality,
and exact dimensions of blob.
"""
found = False
for i, group in enumerate(self.blob_names):
for j, element in enumerate(group):

nested = any(isinstance(i, list) for i in self.blob_names)

if nested:

found = False
for i, group in enumerate(self.blob_names):
for j, element in enumerate(group):
if element == name:
found = True
break
if element == name:
found = True
break
if element == name:
break
if not found:
raise KeyError('Blob %s not found.' % name)

return i, j, self.blob_nd[i], self.blob_dims[i]
break

if not found:
raise KeyError('Blob %s not found.' % name)

return i, j, self.blob_nd[i], self.blob_dims[i]
else:
i = self.blob_names.index(name)
return None, None, self.blob_nd[i], self.blob_dims[i]

def _get_item(self, name):

Expand Down
Empty file modified ares/analysis/DerivedQuantities.py
100644 → 100755
Empty file.
7 changes: 6 additions & 1 deletion ares/analysis/GalaxyPopulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@
import numpy as np
from ..util import read_lit
import matplotlib.pyplot as pl
from .MultiPlot import MultiPanel
from matplotlib.patches import Patch
from ..util.Stats import symmetrize_errors
from .MultiPlot import MultiPanel, add_master_legend

datasets_lf = ('oesch2013', 'oesch2014', 'bouwens2015', 'atek2015',
'parsa2016', 'finkelstein2015', 'vanderburg2010', 'alavi2016',
Expand Down Expand Up @@ -381,4 +381,9 @@ def annotated_legend(self, ax, loc=(0.95, 0.05), sources='all'):
pl.draw()

return ax

def add_master_legend(self, mp, **kwargs):
return add_master_legend(mp, **kwargs)



20 changes: 19 additions & 1 deletion ares/analysis/Global21cm.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -268,8 +268,26 @@ def turning_points(self):

result = self.track.turning_points
result.update(fixes)

self._turning_points = result

##
# If there are multiple extrema (e.g, C and C'), fix order.
##

# Don't do this just yet. Going to break compatibility with
# some recent datasets.

#for tp in list('BCD'):
# if '%sp' % tp in self.turning_points:
# tmp_p = self.turning_points['%sp' % tp]
# tmp = self.turning_points[tp]
#
# del self.turning_points['%sp' % tp]
# del self.turning_points[tp]
#
# self.turning_points[tp] = tmp_p
# self.turning_points['%sp' % tp] = tmp

return self._turning_points

Expand Down
4 changes: 2 additions & 2 deletions ares/analysis/InlineAnalysis.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def generate_blobs(self):

if not self.pf['tanh_model']:
blob_names = []
for i, pop in enumerate(range(self.pf.Npops)):
for i in xrange(self.pf.Npops):

if hasattr(self.sim, 'pops'):
pop = self.sim.pops[i]
Expand Down Expand Up @@ -166,7 +166,7 @@ def turning_points(self):

delay = self.pf['stop_delay']

for i in range(len(self.history['z'])):
for i in xrange(len(self.history['z'])):
if i < 10:
continue

Expand Down
Empty file modified ares/analysis/MetaGalacticBackground.py
100644 → 100755
Empty file.
Empty file modified ares/analysis/ModelSelection.py
100644 → 100755
Empty file.
Loading

0 comments on commit d9c5805

Please sign in to comment.