Skip to content

Commit

Permalink
1.5 : LONG TERM RELEASE. Early adopter release. Some parts of the plu…
Browse files Browse the repository at this point in the history
…gin is not yet operational.
  • Loading branch information
Fredrik Lindberg authored and Fredrik Lindberg committed Aug 2, 2018
1 parent fa6695a commit d17310a
Show file tree
Hide file tree
Showing 360 changed files with 16,568 additions and 16,979 deletions.
23 changes: 12 additions & 11 deletions BenchMarking/Benchmark_SUEWS.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from builtins import zip
#!/usr/bin/env python
import numpy as np
#import pandas as pd
Expand Down Expand Up @@ -112,7 +113,7 @@ def load_res_all(fn_nml, plugin_dir):
# 1. variables consisting of only NAN
# 2. timestamp with any NAN
res = pd.Panel({k: df.dropna(axis=1, how='all').dropna(axis=0, how='any')
for k, df in res.to_frame().to_panel().iteritems()})
for k, df in res.to_frame().to_panel().items()})

# calculate benchmark metrics
return res
Expand All @@ -128,7 +129,7 @@ def load_res_var(fn_nml, list_var_user, plugin_dir):
list_var = list_var_valid.intersection(list_var_user)

# select variables of interest
res = {k: v.loc[:, list_var] for k, v in res_all.iteritems()}
res = {k: v.loc[:, list_var] for k, v in res_all.items()}

return res

Expand All @@ -138,7 +139,7 @@ def load_res(fn_nml, plugin_dir):
prm = f90nml.read(fn_nml)
prm_benchmark = prm['benchmark']
list_var = prm_benchmark['list_var']
if not (isinstance(list_var, basestring) or isinstance(list_var, list)):
if not (isinstance(list_var, str) or isinstance(list_var, list)):
res = load_res_all(fn_nml, plugin_dir)
else:
# res=kwargs.keys()
Expand All @@ -152,7 +153,7 @@ def plotMatMetric(res, base, func, title):

# calculate metrics
resPlot = pd.DataFrame([func(x, base)
for k, x in res.iteritems()], index=res.keys())
for k, x in res.items()], index=list(res.keys()))

# rescale metrics values for plotting: [0,1]
# resPlot_res = func_Norm(resPlot).dropna(axis=1) # nan will be dropped
Expand Down Expand Up @@ -213,7 +214,7 @@ def plotMatMetric(res, base, func, title):

def plotMatMetricX(res_panel, func, title):
res_comp = pd.Panel({x: res_panel[x]
for x in res_panel.keys() if not x == 'base'})
for x in list(res_panel.keys()) if not x == 'base'})
fig = plotMatMetric(res_comp, res_panel['base'], func, title)
return fig

Expand Down Expand Up @@ -273,16 +274,16 @@ def benchmarkSUEWS(fn_nml, plugin_dir):

# calculate metrics based on different functions:
res_metric = {f: pd.DataFrame([list_func[f](data[x], data['base'])
for x in data.keys() if not x == 'base'],
index=[x for x in data.keys() if not x == 'base']).dropna(axis=1)
for f in list_func.keys()}
for x in list(data.keys()) if not x == 'base'],
index=[x for x in list(data.keys()) if not x == 'base']).dropna(axis=1)
for f in list(list_func.keys())}
res_metric = pd.Panel(res_metric)

# calculate overall performance:
# this method is very simple at the moment and needs to be refined with
# more options
res_score_sub = pd.DataFrame(
[1 - func_Norm(v.transpose().mean()) for key, v in res_metric.iteritems()])
[1 - func_Norm(v.transpose().mean()) for key, v in res_metric.items()])
res_score = res_score_sub.mean(axis=0) * 100

# plotting:
Expand All @@ -294,7 +295,7 @@ def benchmarkSUEWS(fn_nml, plugin_dir):
# 2. sub-indicators
# plot each metric in one page
fig_metric = {name_func: plotMatMetricX(data, list_func[name_func], name_func)
for name_func in list_func.keys()}
for name_func in list(list_func.keys())}
res_fig.update(fig_metric)

return res_fig
Expand All @@ -310,7 +311,7 @@ def report_benchmark(fn_nml, plugin_dir):
figs = benchmarkSUEWS(fn_nml, plugin_dir)
with PdfPages(basename_output + '.pdf') as pdf:
pdf.savefig(figs['score'], bbox_inches='tight', papertype='a4')
for k, x in figs.iteritems():
for k, x in figs.items():
if k != 'score':
pdf.savefig(x, bbox_inches='tight',
papertype='a4', orientation='portrait')
23 changes: 12 additions & 11 deletions BenchMarking/benchmarking.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,21 +20,22 @@
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox
# Initialize Qt resources from file resources.py
# import resources
# Import the code for the dialog
from benchmarking_dialog import BenchMarkingDialog
from __future__ import absolute_import
from builtins import str
from builtins import object
from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox
from qgis.PyQt.QtGui import QIcon
from .benchmarking_dialog import BenchMarkingDialog
import os.path
from ..Utilities import f90nml

import Benchmark_SUEWS as bss
from . import Benchmark_SUEWS as bss
import numpy as np
import webbrowser
import shutil

class BenchMarking:
class BenchMarking(object):
"""QGIS Plugin Implementation."""

def __init__(self, iface):
Expand Down Expand Up @@ -157,7 +158,7 @@ def run(self):
# Check the more unusual dependencies to prevent confusing errors later
try:
import pandas
except Exception, e:
except Exception as e:
QMessageBox.critical(None, 'Error', 'The Benchmarking feature requires the pandas package to be installed. '
'Please consult the FAQ in the manual for further information on how'
'to install missing python packages.')
Expand Down Expand Up @@ -268,7 +269,7 @@ def nml_save(self):

def pdf_save(self):
self.outputfile = self.fileDialogPDF.getSaveFileName(None, "Save File As:", None, "PDF (*.pdf)")
self.dlg.textOutputPDF.setText(self.outputfile)
self.dlg.textOutputPDF.setText(self.outputfile[0])

def start_progress(self):

Expand Down Expand Up @@ -325,6 +326,6 @@ def start_progress(self):
shutil.copy(self.plugin_dir + '/benchmark.nml', self.dlg.textInput_NamelistOut.text())

def help(self):
url = "http://umep-docs.readthedocs.io/en/latest/post_processor/Benchmark%20System.html"
url = "http://www.urban-climate.net/umep/UMEP_Manual#Benchmark_System"
webbrowser.open_new_tab(url)

5 changes: 3 additions & 2 deletions BenchMarking/benchmarking_dialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,14 @@

import os

from PyQt4 import QtGui, uic
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QDialog

FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'benchmarking_dialog_base.ui'))


class BenchMarkingDialog(QtGui.QDialog, FORM_CLASS):
class BenchMarkingDialog(QDialog, FORM_CLASS):
def __init__(self, parent=None):
"""Constructor."""
super(BenchMarkingDialog, self).__init__(parent)
Expand Down
2 changes: 1 addition & 1 deletion BenchMarking/resources_rc.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
#
# WARNING! All changes made in this file will be lost!

from PyQt4 import QtCore
from qgis.PyQt import QtCore

qt_resource_data = "\
\x00\x00\x04\x0a\
Expand Down
Loading

0 comments on commit d17310a

Please sign in to comment.