diff --git a/BenchMarking/Benchmark_SUEWS.py b/BenchMarking/Benchmark_SUEWS.py index 87e93aa..f911f6e 100644 --- a/BenchMarking/Benchmark_SUEWS.py +++ b/BenchMarking/Benchmark_SUEWS.py @@ -1,3 +1,4 @@ +from builtins import zip #!/usr/bin/env python import numpy as np #import pandas as pd @@ -112,7 +113,7 @@ def load_res_all(fn_nml, plugin_dir): # 1. variables consisting of only NAN # 2. timestamp with any NAN res = pd.Panel({k: df.dropna(axis=1, how='all').dropna(axis=0, how='any') - for k, df in res.to_frame().to_panel().iteritems()}) + for k, df in res.to_frame().to_panel().items()}) # calculate benchmark metrics return res @@ -128,7 +129,7 @@ def load_res_var(fn_nml, list_var_user, plugin_dir): list_var = list_var_valid.intersection(list_var_user) # select variables of interest - res = {k: v.loc[:, list_var] for k, v in res_all.iteritems()} + res = {k: v.loc[:, list_var] for k, v in res_all.items()} return res @@ -138,7 +139,7 @@ def load_res(fn_nml, plugin_dir): prm = f90nml.read(fn_nml) prm_benchmark = prm['benchmark'] list_var = prm_benchmark['list_var'] - if not (isinstance(list_var, basestring) or isinstance(list_var, list)): + if not (isinstance(list_var, str) or isinstance(list_var, list)): res = load_res_all(fn_nml, plugin_dir) else: # res=kwargs.keys() @@ -152,7 +153,7 @@ def plotMatMetric(res, base, func, title): # calculate metrics resPlot = pd.DataFrame([func(x, base) - for k, x in res.iteritems()], index=res.keys()) + for k, x in res.items()], index=list(res.keys())) # rescale metrics values for plotting: [0,1] # resPlot_res = func_Norm(resPlot).dropna(axis=1) # nan will be dropped @@ -213,7 +214,7 @@ def plotMatMetric(res, base, func, title): def plotMatMetricX(res_panel, func, title): res_comp = pd.Panel({x: res_panel[x] - for x in res_panel.keys() if not x == 'base'}) + for x in list(res_panel.keys()) if not x == 'base'}) fig = plotMatMetric(res_comp, res_panel['base'], func, title) return fig @@ -273,16 +274,16 @@ def benchmarkSUEWS(fn_nml, plugin_dir): # calculate metrics based on different functions: res_metric = {f: pd.DataFrame([list_func[f](data[x], data['base']) - for x in data.keys() if not x == 'base'], - index=[x for x in data.keys() if not x == 'base']).dropna(axis=1) - for f in list_func.keys()} + for x in list(data.keys()) if not x == 'base'], + index=[x for x in list(data.keys()) if not x == 'base']).dropna(axis=1) + for f in list(list_func.keys())} res_metric = pd.Panel(res_metric) # calculate overall performance: # this method is very simple at the moment and needs to be refined with # more options res_score_sub = pd.DataFrame( - [1 - func_Norm(v.transpose().mean()) for key, v in res_metric.iteritems()]) + [1 - func_Norm(v.transpose().mean()) for key, v in res_metric.items()]) res_score = res_score_sub.mean(axis=0) * 100 # plotting: @@ -294,7 +295,7 @@ def benchmarkSUEWS(fn_nml, plugin_dir): # 2. sub-indicators # plot each metric in one page fig_metric = {name_func: plotMatMetricX(data, list_func[name_func], name_func) - for name_func in list_func.keys()} + for name_func in list(list_func.keys())} res_fig.update(fig_metric) return res_fig @@ -310,7 +311,7 @@ def report_benchmark(fn_nml, plugin_dir): figs = benchmarkSUEWS(fn_nml, plugin_dir) with PdfPages(basename_output + '.pdf') as pdf: pdf.savefig(figs['score'], bbox_inches='tight', papertype='a4') - for k, x in figs.iteritems(): + for k, x in figs.items(): if k != 'score': pdf.savefig(x, bbox_inches='tight', papertype='a4', orientation='portrait') diff --git a/BenchMarking/benchmarking.py b/BenchMarking/benchmarking.py index 8bb3e11..17634d9 100644 --- a/BenchMarking/benchmarking.py +++ b/BenchMarking/benchmarking.py @@ -20,21 +20,22 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox -# Initialize Qt resources from file resources.py -# import resources -# Import the code for the dialog -from benchmarking_dialog import BenchMarkingDialog +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon +from .benchmarking_dialog import BenchMarkingDialog import os.path from ..Utilities import f90nml -import Benchmark_SUEWS as bss +from . import Benchmark_SUEWS as bss import numpy as np import webbrowser import shutil -class BenchMarking: +class BenchMarking(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -157,7 +158,7 @@ def run(self): # Check the more unusual dependencies to prevent confusing errors later try: import pandas - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The Benchmarking feature requires the pandas package to be installed. ' 'Please consult the FAQ in the manual for further information on how' 'to install missing python packages.') @@ -268,7 +269,7 @@ def nml_save(self): def pdf_save(self): self.outputfile = self.fileDialogPDF.getSaveFileName(None, "Save File As:", None, "PDF (*.pdf)") - self.dlg.textOutputPDF.setText(self.outputfile) + self.dlg.textOutputPDF.setText(self.outputfile[0]) def start_progress(self): @@ -325,6 +326,6 @@ def start_progress(self): shutil.copy(self.plugin_dir + '/benchmark.nml', self.dlg.textInput_NamelistOut.text()) def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/post_processor/Benchmark%20System.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Benchmark_System" webbrowser.open_new_tab(url) diff --git a/BenchMarking/benchmarking_dialog.py b/BenchMarking/benchmarking_dialog.py index dd9d575..ae00844 100644 --- a/BenchMarking/benchmarking_dialog.py +++ b/BenchMarking/benchmarking_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'benchmarking_dialog_base.ui')) -class BenchMarkingDialog(QtGui.QDialog, FORM_CLASS): +class BenchMarkingDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(BenchMarkingDialog, self).__init__(parent) diff --git a/BenchMarking/resources_rc.py b/BenchMarking/resources_rc.py index 3941a4d..3df41ab 100644 --- a/BenchMarking/resources_rc.py +++ b/BenchMarking/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/DSMGenerator/dsm_generator.py b/DSMGenerator/dsm_generator.py index 8872e8d..1fa0025 100644 --- a/DSMGenerator/dsm_generator.py +++ b/DSMGenerator/dsm_generator.py @@ -20,20 +20,27 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QFileInfo, QVariant -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox, QButtonGroup -from qgis.gui import QgsMapLayerComboBox, QgsMapLayerProxyModel, QgsFieldComboBox, QgsFieldProxyModel -from qgis.core import QgsVectorLayer, QgsField, QgsExpression, QgsVectorFileWriter +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QFileInfo, QVariant +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox, QButtonGroup +from qgis.PyQt.QtGui import QIcon +from qgis.gui import QgsMapLayerComboBox, QgsFieldComboBox +from qgis.core import QgsVectorLayer, QgsField, QgsExpression, QgsExpressionContext, QgsExpressionContextScope, QgsVectorFileWriter, QgsMapLayerProxyModel, QgsFieldProxyModel, QgsRasterLayer, QgsCoordinateTransform from qgis.analysis import QgsZonalStatistics -import webbrowser, subprocess, urllib, ogr, osr, string +import webbrowser, subprocess, urllib.request, urllib.parse, urllib.error, ogr, osr, string import numpy as np from osgeo import gdal, ogr -from dsm_generator_dialog import DSMGeneratorDialog +from .dsm_generator_dialog import DSMGeneratorDialog import os.path import sys +from osgeo.gdalconst import * - -class DSMGenerator: +class DSMGenerator(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -171,11 +178,11 @@ def initGui(self): def savedsmfile(self): self.DSMoutputfile = self.DSMfileDialog.getSaveFileName(None, "Save File As:", None, "Raster Files (*.tif)") - self.dlg.DSMtextOutput.setText(self.DSMoutputfile) + self.dlg.DSMtextOutput.setText(self.DSMoutputfile[0]) def saveosmfile(self): self.OSMoutputfile = self.OSMfileDialog.getSaveFileName(None, "Save File As:", None, "Shapefiles (*.shp)") - self.dlg.OSMtextOutput.setText(self.OSMoutputfile) + self.dlg.OSMtextOutput.setText(self.OSMoutputfile[0]) def checkbox_canvas(self): extent = self.iface.mapCanvas().extent() @@ -195,7 +202,7 @@ def checkbox_layer(self): # Help button def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Spatial%20Data%20DSM%20Generator.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Spatial_Data:_DSM_Generator" webbrowser.open_new_tab(url) def unload(self): @@ -218,13 +225,15 @@ def start_progress(self): #Check OS and dep if sys.platform == 'darwin': gdal_os_dep = '/Library/Frameworks/GDAL.framework/Versions/Current/Programs/' + #gdal_os_dep = '/Library/Frameworks/GDAL.framework/Programs:/Library/Frameworks/Python.framework/Versions/3.6/bin:' else: gdal_os_dep = '' if self.dlg.canvasButton.isChecked(): # Map Canvas extentCanvasCRS = self.iface.mapCanvas() - can_wkt = extentCanvasCRS.mapRenderer().destinationCrs().toWkt() + #can_wkt = extentCanvasCRS.mapRenderer().destinationCrs().toWkt() + can_wkt = extentCanvasCRS.mapSettings().destinationCrs().toWkt() can_crs = osr.SpatialReference() can_crs.ImportFromWkt(can_wkt) # Raster Layer @@ -302,7 +311,7 @@ def start_progress(self): polygon_ln = fileInfo.baseName() polygon_field = self.layerComboManagerPolygonField.currentField() - idx = vlayer.fieldNameIndex(polygon_field) + idx = vlayer.fields().indexFromName(polygon_field) flname = vlayer.attributeDisplayName(idx) if idx == -1: @@ -368,17 +377,17 @@ def start_progress(self): # Make data queries to overpass-api urlStr = 'http://overpass-api.de/api/map?bbox=' + str(lonlatmin[0]) + ',' + str(lonlatmin[1]) + ',' + str(lonlatmax[0]) + ',' + str(lonlatmax[1]) - osmXml = urllib.urlopen(urlStr).read() + osmXml = urllib.request.urlopen(urlStr).read() #print urlStr # Make OSM building file osmPath = self.plugin_dir + '/temp/OSM_building.osm' osmFile = open(osmPath, 'w') - osmFile.write(osmXml) + osmFile.write(str(osmXml)) if os.fstat(osmFile.fileno()).st_size < 1: urlStr = 'http://api.openstreetmap.org/api/0.6/map?bbox=' + str(lonlatmin[0]) + ',' + str(lonlatmin[1]) + ',' + str(lonlatmax[0]) + ',' + str(lonlatmax[1]) - osmXml = urllib.urlopen(urlStr).read() - osmFile.write(osmXml) + osmXml = urllib.request.urlopen(urlStr).read() + osmFile.write(str(osmXml)) #print 'Open Street Map' if os.fstat(osmFile.fileno()).st_size < 1: QMessageBox.critical(None, "Error", "No OSM data available") @@ -388,13 +397,23 @@ def start_progress(self): outputshp = self.plugin_dir + '/temp/' - osmToShape = gdal_os_dep + 'ogr2ogr --config OSM_CONFIG_FILE "' + self.plugin_dir + '/osmconf.ini" -skipfailures -t_srs EPSG:' + str(rasEPSG) + ' -overwrite -nlt POLYGON -f "ESRI Shapefile" "' + outputshp + '" "' + osmPath + '"' + #osmToShape = gdal_os_dep + 'ogr2ogr --config OSM_CONFIG_FILE "' + self.plugin_dir + '/osmconf.ini" -skipfailures -t_srs EPSG:' + str(rasEPSG) + ' -overwrite -nlt POLYGON -f "ESRI Shapefile" "' + outputshp + '" "' + osmPath + '"' + osmToShape = gdal_os_dep + 'ogr2ogr --config OSM_CONFIG_FILE "' + self.plugin_dir + '/osmconf.ini" -skipfailures -t_srs EPSG:' + str( + rasEPSG) + ' -overwrite -nlt POLYGON -f "ESRI Shapefile" "' + outputshp + '" "' + osmPath + '"' + + Qgs + #print(osmToShape) + print(gdal_os_dep) + #osmConf = 'export OSM_CONFIG_FILE=' + self.plugin_dir + '/osmconf.ini' + #print(osmToShape) if sys.platform == 'win32': si = subprocess.STARTUPINFO() si.dwFlags |= subprocess.STARTF_USESHOWWINDOW + #subprocess.call(osmConf, startupinfo=si) subprocess.call(osmToShape, startupinfo=si) else: + #os.system(osmConf) os.system(osmToShape) driver = ogr.GetDriverByName('ESRI Shapefile') @@ -405,6 +424,7 @@ def start_progress(self): osmPolygonPath = outputshp + 'multipolygons.shp' vlayer = QgsVectorLayer(osmPolygonPath, 'multipolygons', 'ogr') + #print(vlayer.isValid()) polygon_layer = vlayer fileInfo = QFileInfo(polygon_layer.source()) polygon_ln = fileInfo.baseName() @@ -424,10 +444,10 @@ def renameField(srcLayer, oldFieldName, newFieldName): vlayer.startEditing() vlayer.dataProvider().addAttributes([QgsField('bld_height', QVariant.Double, 'double', 3, 2)]) vlayer.updateFields() - bld_lvl = vlayer.fieldNameIndex('bld_levels') - hght = vlayer.fieldNameIndex('height') - bld_hght = vlayer.fieldNameIndex('bld_hght') - bld_height = vlayer.fieldNameIndex('bld_height') + bld_lvl = vlayer.fields().indexFromName('bld_levels') + hght = vlayer.fields().indexFromName('height') + bld_hght = vlayer.fields().indexFromName('bld_hght') + bld_height = vlayer.fields().indexFromName('bld_height') bldLvlHght = float(self.dlg.doubleSpinBoxBldLvl.value()) illegal_chars = string.ascii_letters + "!#$%&'*+^_`|~:" + " " @@ -461,24 +481,42 @@ def renameField(srcLayer, oldFieldName, newFieldName): flname = vlayer.attributeDisplayName(bld_height) counterDiff = counter - counterNone + # Loading raster + fileInfo = QFileInfo(filepath_dem) + baseName = fileInfo.baseName() + rlayer = QgsRasterLayer(filepath_dem, baseName) + # Zonal statistics vlayer.startEditing() - zoneStat = QgsZonalStatistics(vlayer, filepath_dem, "stat_", 1, QgsZonalStatistics.Mean) + #zoneStat = QgsZonalStatistics(vlayer, filepath_dem, "stat_", 1, QgsZonalStatistics.Mean) + zoneStat = QgsZonalStatistics(vlayer, rlayer, "stat_", 1, QgsZonalStatistics.Mean) zoneStat.calculateStatistics(None) - vlayer.dataProvider().addAttributes([QgsField('height_asl', QVariant.Double)]) + vlayer.dataProvider().addAttributes([QgsField('h_asl', QVariant.Double)]) vlayer.updateFields() - e = QgsExpression('stat_mean + ' + flname) - e.prepare(vlayer.pendingFields()) - idx = vlayer.fieldNameIndex('height_asl') + #e = QgsExpression('stat_mean + ' + flname) + #e.evaluate(vlayer.fields()) + idx = vlayer.fields().indexFromName('h_asl') + + features = [feat for feat in vlayer.getFeatures()] + + context = QgsExpressionContext() + scope = QgsExpressionContextScope() + + for feat in features: + scope.setFeature(feat) + context.appendScope(scope) + exp = QgsExpression('stat_mean + ' + flname) + feat[idx] = exp.evaluate(context) + vlayer.updateFeature(feat) - for f in vlayer.getFeatures(): - f[idx] = e.evaluate(f) - vlayer.updateFeature(f) + #for f in vlayer.getFeatures(): + # f[idx] = e.evaluate(f) + # vlayer.updateFeature(f) vlayer.commitChanges() vlayer.startEditing() - idx2 = vlayer.fieldNameIndex('stat_mean') + idx2 = vlayer.fields().indexFromName('stat_mean') vlayer.dataProvider().deleteAttributes([idx2]) vlayer.updateFields() vlayer.commitChanges() @@ -492,7 +530,7 @@ def renameField(srcLayer, oldFieldName, newFieldName): # Create the destination data source - gdalrasterize = gdal_os_dep + 'gdal_rasterize -a ' + 'height_asl' + ' -te ' + str(self.xMin) + ' ' + str(self.yMin) + ' ' + str(self.xMax) + ' ' + str(self.yMax) +\ + gdalrasterize = gdal_os_dep + 'gdal_rasterize -a ' + 'h_asl' + ' -te ' + str(self.xMin) + ' ' + str(self.yMin) + ' ' + str(self.xMax) + ' ' + str(self.yMax) +\ ' -tr ' + str(pixel_size) + ' ' + str(pixel_size) + ' -l "' + str(polygon_ln) + '" "' \ + str(polygon_layer.source()) + '" "' + self.plugin_dir + '/temp/clipdsm.tif"' @@ -527,8 +565,8 @@ def renameField(srcLayer, oldFieldName, newFieldName): if self.dlg.checkBoxPolygon.isChecked(): vlayer.startEditing() - idxHght = vlayer.fieldNameIndex('height_asl') - idxBld = vlayer.fieldNameIndex('building') + idxHght = vlayer.fields().indexFromName('h_asl') + idxBld = vlayer.fields().indexFromName('building') features = vlayer.getFeatures() #for f in vlayer.getFeatures(): for f in features: @@ -548,11 +586,11 @@ def renameField(srcLayer, oldFieldName, newFieldName): #vlayer.deleteFeature(f.id()) vlayer.updateFields() vlayer.commitChanges() - QgsVectorFileWriter.writeAsVectorFormat(vlayer, str(self.OSMoutputfile), "UTF-8", None, "ESRI Shapefile") + QgsVectorFileWriter.writeAsVectorFormat(vlayer, str(self.OSMoutputfile), "UTF-8", QgsCoordinateTransform(), "ESRI Shapefile") else: vlayer.startEditing() - idx3 = vlayer.fieldNameIndex('height_asl') + idx3 = vlayer.fields().indexFromName('h_asl') vlayer.dataProvider().deleteAttributes([idx3]) vlayer.updateFields() vlayer.commitChanges() @@ -565,7 +603,7 @@ def saveraster(gdal_data, filename, rows = gdal_data.RasterYSize cols = gdal_data.RasterXSize - outDs = gdal.GetDriverByName("GTiff").Create(filename, cols, rows, int(1), gdal.GDT_Float32) + outDs = gdal.GetDriverByName("GTiff").Create(filename, cols, rows, int(1), GDT_Float32) outBand = outDs.GetRasterBand(1) # write the data @@ -578,10 +616,10 @@ def saveraster(gdal_data, filename, outDs.SetGeoTransform(gdal_data.GetGeoTransform()) outDs.SetProjection(gdal_data.GetProjection()) - saveraster(dsm_raster, self.DSMoutputfile, dsm_array) + saveraster(dsm_raster, self.DSMoutputfile[0], dsm_array) # Load result into canvas - rlayer = self.iface.addRasterLayer(self.DSMoutputfile) + rlayer = self.iface.addRasterLayer(self.DSMoutputfile[0]) # Trigger a repaint if hasattr(rlayer, "setCacheImage"): diff --git a/DSMGenerator/dsm_generator_backup.py b/DSMGenerator/dsm_generator_backup.py new file mode 100644 index 0000000..0c73ec5 --- /dev/null +++ b/DSMGenerator/dsm_generator_backup.py @@ -0,0 +1,643 @@ +# -*- coding: utf-8 -*- +""" +/*************************************************************************** + DSMGenerator + A QGIS plugin + This plugin generates a DSM from DEM and OSM or other polygon height data. + ------------------- + begin : 2017-10-26 + git sha : $Format:%H$ + copyright : (C) 2017 by Nils Wallenberg + email : nils.wallenberg@gvc.gu.se + ***************************************************************************/ + +/*************************************************************************** + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + ***************************************************************************/ +""" +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QFileInfo, QVariant +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox, QButtonGroup +from qgis.PyQt.QtGui import QIcon +from qgis.gui import QgsMapLayerComboBox, QgsFieldComboBox +from qgis.core import QgsVectorLayer, QgsField, QgsExpression, QgsVectorFileWriter, QgsMapLayerProxyModel, QgsFieldProxyModel +from qgis.analysis import QgsZonalStatistics +import webbrowser, subprocess, urllib.request, urllib.parse, urllib.error, ogr, osr, string +import numpy as np +from osgeo import gdal, ogr +from .dsm_generator_dialog import DSMGeneratorDialog +import os.path +import sys + + +class DSMGenerator(object): + """QGIS Plugin Implementation.""" + + def __init__(self, iface): + """Constructor. + + :param iface: An interface instance that will be passed to this class + which provides the hook by which you can manipulate the QGIS + application at run time. + :type iface: QgsInterface + """ + # Save reference to the QGIS interface + self.iface = iface + # initialize plugin directory + self.plugin_dir = os.path.dirname(__file__) + # initialize locale + locale = QSettings().value('locale/userLocale')[0:2] + locale_path = os.path.join( + self.plugin_dir, + 'i18n', + 'DSMGenerator_{}.qm'.format(locale)) + + if os.path.exists(locale_path): + self.translator = QTranslator() + self.translator.load(locale_path) + + if qVersion() > '4.3.3': + QCoreApplication.installTranslator(self.translator) + + # Create the dialog (after translation) and keep reference + self.dlg = DSMGeneratorDialog() + + # Declare instance attributes + self.actions = [] + self.menu = self.tr(u'&DSM Generator') + # TODO: We are going to let the user set this up in a future iteration + # self.toolbar = self.iface.addToolBar(u'DSMGenerator') + # self.toolbar.setObjectName(u'DSMGenerator') + + # Declare variables + self.OSMoutputfile = None + self.DSMoutputfile = None + + if not (os.path.isdir(self.plugin_dir + '/temp')): + os.mkdir(self.plugin_dir + '/temp') + + # Access the raster layer + self.layerComboManagerDEM = QgsMapLayerComboBox(self.dlg.widgetRaster) + self.layerComboManagerDEM.setFilters(QgsMapLayerProxyModel.RasterLayer) + self.layerComboManagerDEM.setFixedWidth(175) + self.layerComboManagerDEM.setCurrentIndex(-1) + + # Access the vector layer and an attribute field + self.layerComboManagerPolygon = QgsMapLayerComboBox(self.dlg.widgetPolygon) + self.layerComboManagerPolygon.setCurrentIndex(-1) + self.layerComboManagerPolygon.setFilters(QgsMapLayerProxyModel.PolygonLayer) + self.layerComboManagerPolygon.setFixedWidth(175) + self.layerComboManagerPolygonField = QgsFieldComboBox(self.dlg.widgetField) + self.layerComboManagerPolygonField.setFilters(QgsFieldProxyModel.Numeric) + self.layerComboManagerPolygonField.setFixedWidth(150) + self.layerComboManagerPolygon.layerChanged.connect(self.layerComboManagerPolygonField.setLayer) + + # Set up of DSM file save dialog + self.DSMfileDialog = QFileDialog() + self.dlg.saveButton.clicked.connect(self.savedsmfile) + + # Set up of OSM polygon file save dialog + self.OSMfileDialog = QFileDialog() + self.dlg.savePolygon.clicked.connect(self.saveosmfile) + + # Set up for the Help button + self.dlg.helpButton.clicked.connect(self.help) + + # Set up for the Close button + self.dlg.closeButton.clicked.connect(self.resetPlugin) + + # Set up for the Run button + self.dlg.runButton.clicked.connect(self.start_progress) + + # Set up extent + self.dlg.canvasButton.toggled.connect(self.checkbox_canvas) + # self.dlg.layerButton.toggled.connect(self.checkbox_layer) + + self.layerComboManagerExtent = QgsMapLayerComboBox(self.dlg.widgetLayerExtent) + self.layerComboManagerExtent.setCurrentIndex(-1) + self.layerComboManagerExtent.layerChanged.connect(self.checkbox_layer) + self.layerComboManagerExtent.setFixedWidth(175) + + # noinspection PyMethodMayBeStatic + def tr(self, message): + return QCoreApplication.translate('DSMGenerator', message) + + def add_action( + self, + icon_path, + text, + callback, + enabled_flag=True, + add_to_menu=True, + add_to_toolbar=True, + status_tip=None, + whats_this=None, + parent=None): + + icon = QIcon(icon_path) + action = QAction(icon, text, parent) + action.triggered.connect(callback) + action.setEnabled(enabled_flag) + + if status_tip is not None: + action.setStatusTip(status_tip) + + if whats_this is not None: + action.setWhatsThis(whats_this) + + if add_to_toolbar: + self.toolbar.addAction(action) + + if add_to_menu: + self.iface.addPluginToMenu( + self.menu, + action) + + self.actions.append(action) + + return action + + def initGui(self): + """Create the menu entries and toolbar icons inside the QGIS GUI.""" + icon_path = ':/plugins/DSMGenerator/icon.png' + self.add_action( + icon_path, + text=self.tr(u'DSM Generator'), + callback=self.run, + parent=self.iface.mainWindow()) + + def savedsmfile(self): + self.DSMoutputfile = self.DSMfileDialog.getSaveFileName(None, "Save File As:", None, "Raster Files (*.tif)") + self.dlg.DSMtextOutput.setText(self.DSMoutputfile[0]) + + def saveosmfile(self): + self.OSMoutputfile = self.OSMfileDialog.getSaveFileName(None, "Save File As:", None, "Shapefiles (*.shp)") + self.dlg.OSMtextOutput.setText(self.OSMoutputfile[0]) + + def checkbox_canvas(self): + extent = self.iface.mapCanvas().extent() + self.dlg.lineEditNorth.setText(str(extent.yMaximum())) + self.dlg.lineEditSouth.setText(str(extent.yMinimum())) + self.dlg.lineEditWest.setText(str(extent.xMinimum())) + self.dlg.lineEditEast.setText(str(extent.xMaximum())) + + def checkbox_layer(self): + dem_layer_extent = self.layerComboManagerExtent.currentLayer() + if dem_layer_extent: + extent = dem_layer_extent.extent() + self.dlg.lineEditNorth.setText(str(extent.yMaximum())) + self.dlg.lineEditSouth.setText(str(extent.yMinimum())) + self.dlg.lineEditWest.setText(str(extent.xMinimum())) + self.dlg.lineEditEast.setText(str(extent.xMaximum())) + + # Help button + def help(self): + url = "http://www.urban-climate.net/umep/UMEP_Manual#Spatial_Data:_DSM_Generator" + webbrowser.open_new_tab(url) + + def unload(self): + """Removes the plugin menu item and icon from QGIS GUI.""" + for action in self.actions: + self.iface.removePluginMenu( + self.tr(u'&DSM Generator'), + action) + # self.iface.removeToolBarIcon(action) + # remove the toolbar + # del self.toolbar + + def run(self): + self.dlg.show() + self.dlg.exec_() + + def start_progress(self): + import datetime + start = datetime.datetime.now() + #Check OS and dep + if sys.platform == 'darwin': + gdal_os_dep = '/Library/Frameworks/GDAL.framework/Versions/Current/Programs/' + else: + gdal_os_dep = '' + + if self.dlg.canvasButton.isChecked(): + # Map Canvas + extentCanvasCRS = self.iface.mapCanvas() + can_wkt = extentCanvasCRS.mapRenderer().destinationCrs().toWkt() + can_crs = osr.SpatialReference() + can_crs.ImportFromWkt(can_wkt) + # Raster Layer + dem_layer = self.layerComboManagerDEM.currentLayer() + dem_prov = dem_layer.dataProvider() + dem_path = str(dem_prov.dataSourceUri()) + dem_raster = gdal.Open(dem_path) + #dem_wkt = dem_layer.exportToWkt() + dem_wkt = dem_raster.GetProjection() + dem_crs = osr.SpatialReference() + dem_crs.ImportFromWkt(dem_wkt) + #print can_crs, "-------------", dem_crs + if can_wkt != dem_crs: + extentCanvas = self.iface.mapCanvas().extent() + extentDEM = dem_layer.extent() + + transformExt = osr.CoordinateTransformation(can_crs, dem_crs) + + canminx = extentCanvas.xMinimum() + canmaxx = extentCanvas.xMaximum() + canminy = extentCanvas.yMinimum() + canmaxy = extentCanvas.yMaximum() + + canxymin = transformExt.TransformPoint(canminx, canminy) + canxymax = transformExt.TransformPoint(canmaxx, canmaxy) + + extDiffminx = canxymin[0] - extentDEM.xMinimum() # If smaller than zero = warning + extDiffminy = canxymin[1] - extentDEM.yMinimum() # If smaller than zero = warning + extDiffmaxx = canxymax[0] - extentDEM.xMaximum() # If larger than zero = warning + extDiffmaxy = canxymax[0] - extentDEM.yMaximum() # If larger than zero = warning + + if extDiffminx < 0 or extDiffminy < 0 or extDiffmaxx > 0 or extDiffmaxy > 0: + QMessageBox.warning(None, "Warning! Extent of map canvas is larger than raster extent.", "Change to an extent equal to or smaller than the raster extent.") + return + + # Extent + self.yMax = self.dlg.lineEditNorth.text() + self.yMin = self.dlg.lineEditSouth.text() + self.xMin = self.dlg.lineEditWest.text() + self.xMax = self.dlg.lineEditEast.text() + + if not self.DSMoutputfile: + QMessageBox.critical(None, "Error", "Specify a raster output file") + return + + if self.dlg.checkBoxPolygon.isChecked() and not self.OSMoutputfile: + QMessageBox.critical(None, "Error", "Specify an output file for OSM data") + return + + # Acquiring geodata and attributes + dem_layer = self.layerComboManagerDEM.currentLayer() + if dem_layer is None: + QMessageBox.critical(None, "Error", "No valid raster layer is selected") + return + else: + provider = dem_layer.dataProvider() + filepath_dem = str(provider.dataSourceUri()) + demRaster = gdal.Open(filepath_dem) + dem_layer_crs = osr.SpatialReference() + dem_layer_crs.ImportFromWkt(demRaster.GetProjection()) + self.dem_layer_unit = dem_layer_crs.GetAttrValue("UNIT") + posUnits = ['metre', 'US survey foot', 'meter', 'm', 'ft', 'feet', 'foot', 'ftUS', 'International foot'] # Possible units + if not self.dem_layer_unit in posUnits: + QMessageBox.critical(None, "Error", "Raster projection is not in metre or foot. Please reproject.") + return + + polygon_layer = self.layerComboManagerPolygon.currentLayer() + osm_layer = self.dlg.checkBoxOSM.isChecked() + if polygon_layer is None and osm_layer is False: + QMessageBox.critical(None, "Error", "No valid building height layer is selected") + return + elif polygon_layer: + vlayer = QgsVectorLayer(polygon_layer.source(), "buildings", "ogr") + fileInfo = QFileInfo(polygon_layer.source()) + polygon_ln = fileInfo.baseName() + + polygon_field = self.layerComboManagerPolygonField.currentField() + idx = vlayer.fieldNameIndex(polygon_field) + flname = vlayer.attributeDisplayName(idx) + + if idx == -1: + QMessageBox.critical(None, "Error", "An attribute with unique fields must be selected") + return + + ### main code ### + + self.dlg.progressBar.setRange(0, 5) + + self.dlg.progressBar.setValue(1) + + if self.dlg.checkBoxOSM.isChecked(): + dem_original = gdal.Open(filepath_dem) + dem_wkt = dem_original.GetProjection() + ras_crs = osr.SpatialReference() + ras_crs.ImportFromWkt(dem_wkt) + rasEPSG = ras_crs.GetAttrValue("PROJCS|AUTHORITY", 1) + if self.dlg.layerButton.isChecked(): + old_crs = ras_crs + elif self.dlg.canvasButton.isChecked(): + canvasCRS = self.iface.mapCanvas() + outputWkt = canvasCRS.mapRenderer().destinationCrs().toWkt() + old_crs = osr.SpatialReference() + old_crs.ImportFromWkt(outputWkt) + + wgs84_wkt = """ + GEOGCS["WGS 84", + DATUM["WGS_1984", + SPHEROID["WGS 84",6378137,298.257223563, + AUTHORITY["EPSG","7030"]], + AUTHORITY["EPSG","6326"]], + PRIMEM["Greenwich",0, + AUTHORITY["EPSG","8901"]], + UNIT["degree",0.01745329251994328, + AUTHORITY["EPSG","9122"]], + AUTHORITY["EPSG","4326"]]""" + + new_crs = osr.SpatialReference() + new_crs.ImportFromWkt(wgs84_wkt) + + transform = osr.CoordinateTransformation(old_crs, new_crs) + + minx = float(self.xMin) + miny = float(self.yMin) + maxx = float(self.xMax) + maxy = float(self.yMax) + lonlatmin = transform.TransformPoint(minx, miny) + lonlatmax = transform.TransformPoint(maxx, maxy) + + if ras_crs != old_crs: + rasTrans = osr.CoordinateTransformation(old_crs, ras_crs) + raslonlatmin = rasTrans.TransformPoint(float(self.xMin), float(self.yMin)) + raslonlatmax = rasTrans.TransformPoint(float(self.xMax), float(self.yMax)) + #else: + #raslonlatmin = [float(self.xMin), float(self.yMin)] + #raslonlatmax = [float(self.xMax), float(self.yMax)] + + self.xMin = raslonlatmin[0] + self.yMin = raslonlatmin[1] + self.xMax = raslonlatmax[0] + self.yMax = raslonlatmax[1] + + # Make data queries to overpass-api + urlStr = 'http://overpass-api.de/api/map?bbox=' + str(lonlatmin[0]) + ',' + str(lonlatmin[1]) + ',' + str(lonlatmax[0]) + ',' + str(lonlatmax[1]) + osmXml = urllib.request.urlopen(urlStr).read() + #print urlStr + + # Make OSM building file + osmPath = self.plugin_dir + '/temp/OSM_building.osm' + osmFile = open(osmPath, 'w') + osmFile.write(str(osmXml)) + if os.fstat(osmFile.fileno()).st_size < 1: + urlStr = 'http://api.openstreetmap.org/api/0.6/map?bbox=' + str(lonlatmin[0]) + ',' + str(lonlatmin[1]) + ',' + str(lonlatmax[0]) + ',' + str(lonlatmax[1]) + osmXml = urllib.request.urlopen(urlStr).read() + osmFile.write(str(osmXml)) + #print 'Open Street Map' + if os.fstat(osmFile.fileno()).st_size < 1: + QMessageBox.critical(None, "Error", "No OSM data available") + return + + osmFile.close() + + outputshp = self.plugin_dir + '/temp/' + + osmToShape = gdal_os_dep + 'ogr2ogr --config OSM_CONFIG_FILE "' + self.plugin_dir + '/osmconf.ini" -skipfailures -t_srs EPSG:' + str(rasEPSG) + ' -overwrite -nlt POLYGON -f "ESRI Shapefile" "' + outputshp + '" "' + osmPath + '"' + + if sys.platform == 'win32': + si = subprocess.STARTUPINFO() + si.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.call(osmToShape, startupinfo=si) + else: + os.system(osmToShape) + + driver = ogr.GetDriverByName('ESRI Shapefile') + driver.DeleteDataSource(outputshp + 'lines.shp') + driver.DeleteDataSource(outputshp + 'multilinestrings.shp') + driver.DeleteDataSource(outputshp + 'other_relations.shp') + driver.DeleteDataSource(outputshp + 'points.shp') + + osmPolygonPath = outputshp + 'multipolygons.shp' + vlayer = QgsVectorLayer(osmPolygonPath, 'multipolygons', 'ogr') + polygon_layer = vlayer + fileInfo = QFileInfo(polygon_layer.source()) + polygon_ln = fileInfo.baseName() + + def renameField(srcLayer, oldFieldName, newFieldName): + ds = gdal.OpenEx(srcLayer.source(), gdal.OF_VECTOR | gdal.OF_UPDATE) + ds.ExecuteSQL('ALTER TABLE {} RENAME COLUMN {} TO {}'.format(srcLayer.name(), oldFieldName, newFieldName)) + srcLayer.reload() + vlayer.startEditing() + renameField(vlayer, 'building_l', 'bld_levels') + renameField(vlayer, 'building_h', 'bld_hght') + renameField(vlayer, 'building_c', 'bld_colour') + renameField(vlayer, 'building_m', 'bld_materi') + renameField(vlayer, 'building_u', 'bld_use') + vlayer.commitChanges() + + vlayer.startEditing() + vlayer.dataProvider().addAttributes([QgsField('bld_height', QVariant.Double, 'double', 3, 2)]) + vlayer.updateFields() + bld_lvl = vlayer.fields().indexFromName('bld_levels') + hght = vlayer.fields().indexFromName('height') + bld_hght = vlayer.fields().indexFromName('bld_hght') + bld_height = vlayer.fields().indexFromName('bld_height') + + bldLvlHght = float(self.dlg.doubleSpinBoxBldLvl.value()) + illegal_chars = string.ascii_letters + "!#$%&'*+^_`|~:" + " " + counterNone = 0 + counter = 0 + #counterWeird = 0 + for feature in vlayer.getFeatures(): + if feature[hght]: + try: + #feature[bld_height] = float(re.sub("[^0-9]", ".", str(feature[hght]))) + feature[bld_height] = float(str(feature[hght]).translate(None, illegal_chars)) + except: + counterNone += 1 + elif feature[bld_hght]: + try: + #feature[bld_height] = float(re.sub("[^0-9]", ".", str(feature[bld_hght]))) + feature[bld_height] = float(str(feature[bld_hght]).translate(None, illegal_chars)) + except: + counterNone += 1 + elif feature[bld_lvl]: + try: + #feature[bld_height] = float(re.sub("[^0-9]", "", str(feature[bld_lvl])))*bldLvlHght + feature[bld_height] = float(str(feature[bld_lvl]).translate(None, illegal_chars)) * bldLvlHght + except: + counterNone += 1 + else: + counterNone += 1 + vlayer.updateFeature(feature) + counter += 1 + vlayer.commitChanges() + flname = vlayer.attributeDisplayName(bld_height) + counterDiff = counter - counterNone + + # Zonal statistics + vlayer.startEditing() + zoneStat = QgsZonalStatistics(vlayer, filepath_dem, "stat_", 1, QgsZonalStatistics.Mean) + zoneStat.calculateStatistics(None) + vlayer.dataProvider().addAttributes([QgsField('height_asl', QVariant.Double)]) + vlayer.updateFields() + e = QgsExpression('stat_mean + ' + flname) + e.prepare(vlayer.pendingFields()) + idx = vlayer.fields().indexFromName('height_asl') + + for f in vlayer.getFeatures(): + f[idx] = e.evaluate(f) + vlayer.updateFeature(f) + + vlayer.commitChanges() + + vlayer.startEditing() + idx2 = vlayer.fieldNameIndex('stat_mean') + vlayer.dataProvider().deleteAttributes([idx2]) + vlayer.updateFields() + vlayer.commitChanges() + + self.dlg.progressBar.setValue(2) + + # Convert polygon layer to raster + + # Define pixel_size and NoData value of new raster + pixel_size = int(self.dlg.spinBox.value()) # half picture size + + # Create the destination data source + + gdalrasterize = gdal_os_dep + 'gdal_rasterize -a ' + 'height_asl' + ' -te ' + str(self.xMin) + ' ' + str(self.yMin) + ' ' + str(self.xMax) + ' ' + str(self.yMax) +\ + ' -tr ' + str(pixel_size) + ' ' + str(pixel_size) + ' -l "' + str(polygon_ln) + '" "' \ + + str(polygon_layer.source()) + '" "' + self.plugin_dir + '/temp/clipdsm.tif"' + + gdalclipdem = gdal_os_dep + 'gdalwarp -dstnodata -9999 -q -overwrite -te ' + str(self.xMin) + ' ' + str(self.yMin) + ' ' + str(self.xMax) + ' ' + str(self.yMax) +\ + ' -tr ' + str(pixel_size) + ' ' + str(pixel_size) + \ + ' -of GTiff ' + '"' + filepath_dem + '" "' + self.plugin_dir + '/temp/clipdem.tif"' + + # Rasterize + if sys.platform == 'win32': + si = subprocess.STARTUPINFO() + si.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.call(gdalrasterize, startupinfo=si) + subprocess.call(gdalclipdem, startupinfo=si) + else: + os.system(gdalrasterize) + os.system(gdalclipdem) + + self.dlg.progressBar.setValue(3) + + # Adding DSM to DEM + # Read DEM + dem_raster = gdal.Open(self.plugin_dir + '/temp/clipdem.tif') + dem_array = np.array(dem_raster.ReadAsArray().astype(np.float)) + dsm_raster = gdal.Open(self.plugin_dir + '/temp/clipdsm.tif') + dsm_array = np.array(dsm_raster.ReadAsArray().astype(np.float)) + + indx = dsm_array.shape + for ix in range(0, int(indx[0])): + for iy in range(0, int(indx[1])): + if int(dsm_array[ix, iy]) == 0: + dsm_array[ix, iy] = dem_array[ix, iy] + + if self.dlg.checkBoxPolygon.isChecked(): + vlayer.startEditing() + idxHght = vlayer.fields().indexFromName('height_asl') + idxBld = vlayer.fields().indexFromName('building') + features = vlayer.getFeatures() + #for f in vlayer.getFeatures(): + for f in features: + geom = f.geometry() + posUnitsMetre = ['metre', 'meter', 'm'] # Possible metre units + posUnitsFt = ['US survey foot', 'ft', 'feet', 'foot', 'ftUS', 'International foot'] # Possible foot units + if self.dem_layer_unit in posUnitsMetre: + sqUnit = 1 + elif self.dem_layer_unit in posUnitsFt: + sqUnit = 10.76 + if int(geom.area()) > 50000*sqUnit: + vlayer.deleteFeature(f.id()) + + #if not f[idxHght]: + #vlayer.deleteFeature(f.id()) + #elif not f[idxBld]: + #vlayer.deleteFeature(f.id()) + vlayer.updateFields() + vlayer.commitChanges() + QgsVectorFileWriter.writeAsVectorFormat(vlayer, str(self.OSMoutputfile), "UTF-8", None, "ESRI Shapefile") + + else: + vlayer.startEditing() + idx3 = vlayer.fields().indexFromName('height_asl') + vlayer.dataProvider().deleteAttributes([idx3]) + vlayer.updateFields() + vlayer.commitChanges() + + self.dlg.progressBar.setValue(4) + + # Save raster + def saveraster(gdal_data, filename, + raster): # gdal_data = raster extent, filename = output filename, raster = numpy array (raster to be saved) + rows = gdal_data.RasterYSize + cols = gdal_data.RasterXSize + + outDs = gdal.GetDriverByName("GTiff").Create(filename, cols, rows, int(1), gdal.GDT_Float32) + outBand = outDs.GetRasterBand(1) + + # write the data + outBand.WriteArray(raster, 0, 0) + # flush data to disk, set the NoData value and calculate stats + outBand.FlushCache() + outBand.SetNoDataValue(-9999) + + # georeference the image and set the projection + outDs.SetGeoTransform(gdal_data.GetGeoTransform()) + outDs.SetProjection(gdal_data.GetProjection()) + + saveraster(dsm_raster, self.DSMoutputfile, dsm_array) + + # Load result into canvas + rlayer = self.iface.addRasterLayer(self.DSMoutputfile) + + # Trigger a repaint + if hasattr(rlayer, "setCacheImage"): + rlayer.setCacheImage(None) + rlayer.triggerRepaint() + + self.dlg.progressBar.setValue(5) + + #runTime = datetime.datetime.now() - start + + if self.dlg.checkBoxOSM.isChecked(): + QMessageBox.information(self.dlg, 'DSM Generator', 'Operation successful! ' + str(counterDiff) + ' building polygons out of ' + str(counter) + ' contained height values.') + #self.iface.messageBar().pushMessage("DSM Generator. Operation successful! " + str(counterDiff) + " buildings out of " + str(counter) + " contained height values.", level=QgsMessageBar.INFO, duration=5) + else: + #self.iface.messageBar().pushMessage("DSM Generator. Operation successful!", level=QgsMessageBar.INFO, duration=5) + QMessageBox.information(self.dlg, 'DSM Generator', 'Operation successful!') + + self.resetPlugin() + + #print "finished run: %s\n\n" % (datetime.datetime.now() - start) + + def resetPlugin(self): # Reset plugin + self.dlg.canvasButton.setAutoExclusive(False) + self.dlg.canvasButton.setChecked(False) + self.dlg.layerButton.setAutoExclusive(False) + self.dlg.layerButton.setChecked(False) + self.dlg.checkBoxOSM.setCheckState(0) + self.dlg.checkBoxPolygon.setCheckState(0) + + # Extent + self.layerComboManagerExtent.setCurrentIndex(-1) + self.dlg.lineEditNorth.setText("") + self.dlg.lineEditSouth.setText("") + self.dlg.lineEditWest.setText("") + self.dlg.lineEditEast.setText("") + + # Output boxes + self.dlg.OSMtextOutput.setText("") + self.dlg.DSMtextOutput.setText("") + + # Input raster + self.layerComboManagerDEM.setCurrentIndex(-1) + + # Input polygon + self.layerComboManagerPolygon.setCurrentIndex(-1) + + # Progress bar + self.dlg.progressBar.setValue(0) + + # Spin boxes + self.dlg.spinBox.setValue(2) + self.dlg.doubleSpinBoxBldLvl.setValue(2.5) diff --git a/DSMGenerator/dsm_generator_dialog.py b/DSMGenerator/dsm_generator_dialog.py index 4e0327b..8b4197a 100644 --- a/DSMGenerator/dsm_generator_dialog.py +++ b/DSMGenerator/dsm_generator_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'dsm_generator_dialog_base.ui')) -class DSMGeneratorDialog(QtGui.QDialog, FORM_CLASS): +class DSMGeneratorDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(DSMGeneratorDialog, self).__init__(parent) diff --git a/DSMGenerator/resources.py b/DSMGenerator/resources.py index dd608f2..fef5a85 100644 --- a/DSMGenerator/resources.py +++ b/DSMGenerator/resources.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/ExtremeFinder/HeatWave/findHW.py b/ExtremeFinder/HeatWave/findHW.py index 6619cd5..b510365 100644 --- a/ExtremeFinder/HeatWave/findHW.py +++ b/ExtremeFinder/HeatWave/findHW.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range # -*- coding: utf-8 -*- ########################################################################### # diff --git a/ExtremeFinder/HeatWave/plotHW.py b/ExtremeFinder/HeatWave/plotHW.py index 6cba55f..20e4c37 100644 --- a/ExtremeFinder/HeatWave/plotHW.py +++ b/ExtremeFinder/HeatWave/plotHW.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range # -*- coding: utf-8 -*- ########################################### @@ -166,7 +168,7 @@ def plotHW(lat,lon,Tmax, xHW, hw_year_start, hw_year_end, labelsForPlot): dataForBoxplot.append(TmaxForBoxplot) lendataForBarchart.append(lenTmaxForBarchart) - YearList = range(hw_year_start,hw_year_end+1) + YearList = list(range(hw_year_start,hw_year_end+1)) # TdataList = [ [] for _ in range(len(YearList))] for i in range(0,len(YearList)): if not YearList[i]==YearsForBoxplot[-1]: @@ -202,7 +204,7 @@ def plotHW(lat,lon,Tmax, xHW, hw_year_start, hw_year_end, labelsForPlot): # Reverse colors and text labels to display the last value at the top. colors = colors[::-1] - xticks=range(1,len(YearsForBoxplot)+1,dyForTicks) + xticks=list(range(1,len(YearsForBoxplot)+1,dyForTicks)) plt.xlabel("Time (Years)") plt.ylabel("Days") plt.xticks(xticks,yearticks_lbl) diff --git a/ExtremeFinder/extreme_finder.py b/ExtremeFinder/extreme_finder.py index b18c48b..c2d4d46 100644 --- a/ExtremeFinder/extreme_finder.py +++ b/ExtremeFinder/extreme_finder.py @@ -20,15 +20,19 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox -from extreme_finder_dialog import ExtremeFinderDialog +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon +from .extreme_finder_dialog import ExtremeFinderDialog import os.path import webbrowser from ..Utilities import f90nml -from HeatWave.findHW import * -from HeatWave.plotHW import plotHW -from PyQt4.QtCore import QDate, QObject, pyqtSignal, QThread +from .HeatWave.findHW import * +from .HeatWave.plotHW import plotHW +from qgis.PyQt.QtCore import QDate, QObject, pyqtSignal, QThread ########################################################################### # # Plugin @@ -36,7 +40,7 @@ ########################################################################### -class ExtremeFinder: +class ExtremeFinder(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -73,8 +77,10 @@ def __init__(self, iface): self.dlg.pushButtonSave_2.clicked.connect(self.infile) self.dlg.pushButtonSave_2.setEnabled(True) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) self.folderPathRaw = 'None' self.save_file = None self.outputfile = 'None' @@ -236,20 +242,20 @@ def unload(self): def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/processor/Outdoor%20Thermal%20Comfort%20ExtremeFinder.html" + url = "http://urban-climate.net/umep/UMEP_Manual#Outdoor_Thermal_Comfort:_ExtremeFinder" webbrowser.open_new_tab(url) def run(self): try: import pandas as pd - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The Extreme Finder requires the pandas package ' 'to be installed. Please consult the manual for further information') return try: from netCDF4 import Dataset, date2num - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The Extreme Finder requires the netCDF4 package ' 'to be installed. Please consult the manual for further information') return @@ -299,11 +305,11 @@ def error(self, exception, text): def infile(self): filename = QFileDialog.getOpenFileName() - if filename.split('.')[-1]=='nc': + if filename[0].split('.')[-1] == 'nc': # If a NetCDF file, try and get metadata to populate the dialog elements - (lat, lon, start_date, end_date) = get_ncmetadata(filename) + (lat, lon, start_date, end_date) = get_ncmetadata(filename[0]) - self.dlg.textInput.setText(filename) + self.dlg.textInput.setText(filename[0]) if (lat is not None) and (lon is not None): self.dlg.textOutput_lat.setText(str(lat)) self.dlg.textOutput_lon.setText(str(lon)) @@ -318,7 +324,7 @@ def outfile(self): # result = self.fileDialog.exec_() if not outputfile == 'None': self.outputfile = outputfile - self.dlg.textOutput.setText(self.outputfile) + self.dlg.textOutput.setText(self.outputfile[0]) def start_progress_wrapped(self): ''' @@ -326,7 +332,7 @@ def start_progress_wrapped(self): ''' try: self.start_progress() - except Exception, e: + except Exception as e: QMessageBox.critical(None, "Error", str(e)) self.dlg.runButton.setEnabled(True) @@ -383,7 +389,7 @@ def start_progress(self): file_name = filein else: raise ValueError('Invalid data format') - except Exception, e: + except Exception as e: raise Exception('Invalid input file data format') self.validateInputDates() @@ -391,9 +397,9 @@ def start_progress(self): if filein.split('.')[-1]=='nc': try: Tdata, unit, self.lat, self.lon, self.hw_start, self.hw_end = get_ncdata(file_name, self.hw_start.year, self.hw_end.year, var) - except KeyError,e: + except KeyError as e: raise Exception('NetCDF file must contain the variable %s in order to continue'%(e,)) - except Exception,e: + except Exception as e: raise e unit = '(' + str(unit) + ')' @@ -484,7 +490,7 @@ def start_progress(self): try: if len(result) == 0: raise ValueError('No Heat/Cold Wave Found') - except Exception, e: + except Exception as e: if mode == "HW": QMessageBox.critical(None, "Error", "No extreme high values found") if mode == "CW": diff --git a/ExtremeFinder/extreme_finder_dialog.py b/ExtremeFinder/extreme_finder_dialog.py index b3307ea..a3228df 100644 --- a/ExtremeFinder/extreme_finder_dialog.py +++ b/ExtremeFinder/extreme_finder_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'extreme_finder_dialog_base.ui')) -class ExtremeFinderDialog(QtGui.QDialog, FORM_CLASS): +class ExtremeFinderDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(ExtremeFinderDialog, self).__init__(parent) diff --git a/ExtremeFinder/resources.py b/ExtremeFinder/resources.py index c3a3790..1ba78c3 100644 --- a/ExtremeFinder/resources.py +++ b/ExtremeFinder/resources.py @@ -6,7 +6,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/FootprintModel/KingsFootprint_UMEP.py b/FootprintModel/KingsFootprint_UMEP.py index c1af8e6..84ebb10 100644 --- a/FootprintModel/KingsFootprint_UMEP.py +++ b/FootprintModel/KingsFootprint_UMEP.py @@ -1,3 +1,7 @@ +from __future__ import print_function +from builtins import zip +from builtins import str +from builtins import range ####################### FOOTPRINT MODEL WITH ITERATIONS TO CALC NEW ZO AND ZD ####################################### #%Date: 22 October 2015 %# #%Author: %# @@ -153,7 +157,7 @@ def footprintiterKAM(iterations,z_0_input,z_d_input,z_ag,sigv,Obukhov,ustar,dir, domain_y = domain_y / d_input fy = fx full = np.zeros([fx, fy]) - full[(fx+1)/2:int((fx+1)/2+(domain_x)),int((fy/2+1)-domain_y):int((fy/2+1)+domain_y+1)]=phi + full[int((fx+1)/2):int((fx+1)/2+(domain_x)),int((fy/2+1)-domain_y):int((fy/2+1)+domain_y+1)] = phi full[np.isnan(full)]=0 ##Rotation for wind angle for absolute plot and correction for rotation algorithm @@ -550,13 +554,15 @@ def FFP_climatology(zm=None, z0=None, umean=None, h=None, ol=None, sigmav=None, valids = [True if not any([val is None for val in vals]) else False \ for vals in zip(ustars, sigmavs, hs, ols, wind_dirs, zms)] - if verbosity > 1: print '' + if verbosity > 1: # fix_print_with_import + print('') for ix, (ustar, sigmav, h, ol, wind_dir, zm, z0, umean) \ in enumerate(zip(ustars, sigmavs, hs, ols, wind_dirs, zms, z0s, umeans)): # Counter if verbosity > 1 and ix % pulse == 0: - print 'Calculating footprint ', ix+1, ' of ', ts_len + # fix_print_with_import + print('Calculating footprint ', ix+1, ' of ', ts_len) valids[ix] = check_ffp_inputs(ustar, sigmav, h, ol, wind_dir, zm, z0, umean, rslayer, verbosity) @@ -630,7 +636,8 @@ def FFP_climatology(zm=None, z0=None, umean=None, h=None, ol=None, sigmav=None, vs = None clevs = None if n==0: - print "No footprint calculated" + # fix_print_with_import + print("No footprint calculated") flag_err = 1 else: @@ -957,12 +964,15 @@ def raise_ffp_exception(code, verbosity): raise Exception(string) elif ex['type'] == exTypes['alert']: string = string + '\n Execution continues.' - if verbosity > 1: print string + if verbosity > 1: # fix_print_with_import + print(string) elif ex['type'] == exTypes['error']: string = string + '\n Execution continues.' - if verbosity > 1: print string + if verbosity > 1: # fix_print_with_import + print(string) else: - if verbosity > 1: print string + if verbosity > 1: # fix_print_with_import + print(string) diff --git a/FootprintModel/footprint_model.py b/FootprintModel/footprint_model.py index 6d4bdae..cb026bd 100644 --- a/FootprintModel/footprint_model.py +++ b/FootprintModel/footprint_model.py @@ -20,21 +20,25 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox, QColor +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.gui import * -from qgis.core import * -from footprint_model_dialog import FootprintModelDialog +from qgis.core import QgsMapLayerProxyModel, QgsFeature, QgsGeometry, QgsVectorLayer, QgsProject +from .footprint_model_dialog import FootprintModelDialog import os.path import numpy as np -import KingsFootprint_UMEP as fp +from . import KingsFootprint_UMEP as fp from osgeo import gdal import subprocess import sys import webbrowser -class FootprintModel: +class FootprintModel(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -95,26 +99,17 @@ def __init__(self, iface): self.filePath = None self.data = None self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) - # self.layerComboManagerPoint = VectorLayerCombo(self.dlg.comboBox_Point) - # fieldgen = VectorLayerCombo(self.dlg.comboBox_Point, initLayer="", options={"geomType": QGis.Point}) self.layerComboManagerPoint = QgsMapLayerComboBox(self.dlg.widgetPointLayer) self.layerComboManagerPoint.setCurrentIndex(-1) self.layerComboManagerPoint.setFilters(QgsMapLayerProxyModel.PointLayer) self.layerComboManagerPoint.setFixedWidth(175) - # self.layerComboManagerPointField = FieldCombo(self.dlg.comboBox_Field, fieldgen, initField="") - # self.layerComboManagerDSMbuildground = RasterLayerCombo(self.dlg.comboBox_DSMbuildground) - # RasterLayerCombo(self.dlg.comboBox_DSMbuildground, initLayer="") - # self.layerComboManagerDEM = RasterLayerCombo(self.dlg.comboBox_DEM) - # RasterLayerCombo(self.dlg.comboBox_DEM, initLayer="") - # self.layerComboManagerDSMbuild = RasterLayerCombo(self.dlg.comboBox_DSMbuild) - # RasterLayerCombo(self.dlg.comboBox_DSMbuild, initLayer="") - # self.layerComboManagerVEGDSM = RasterLayerCombo(self.dlg.comboBox_vegdsm) - # RasterLayerCombo(self.dlg.comboBox_vegdsm, initLayer="") - - self.layerComboManagerDSMbuildground = QgsMapLayerComboBox(self.dlg.widgetDSMbuildground ) + + self.layerComboManagerDSMbuildground = QgsMapLayerComboBox(self.dlg.widgetDSMbuildground) self.layerComboManagerDSMbuildground .setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerDSMbuildground .setFixedWidth(175) self.layerComboManagerDSMbuildground .setCurrentIndex(-1) @@ -203,7 +198,7 @@ def unload(self): action) self.iface.removeToolBarIcon(action) # remove the toolbar - del self.toolbar + # del self.toolbar def run(self): self.dlg.show() @@ -239,15 +234,15 @@ def folder_path(self): def create_point(self, point): self.dlg.closeButton.setEnabled(1) - QgsMapLayerRegistry.instance().addMapLayer(self.poiLayer) + QgsProject.instance().addMapLayer(self.poiLayer) # create the feature fc = int(self.provider.featureCount()) feature = QgsFeature() - feature.setGeometry(QgsGeometry.fromPoint(point)) + feature.setGeometry(QgsGeometry.fromPointXY(point)) feature.setAttributes([fc, point.x(), point.y()]) self.poiLayer.startEditing() - self.poiLayer.addFeature(feature, True) + self.poiLayer.addFeature(feature) #, True self.poiLayer.commitChanges() self.poiLayer.triggerRepaint() self.dlg.setEnabled(True) @@ -258,7 +253,7 @@ def create_point(self, point): def select_point(self): # Connected to "Select Point on Canves" if self.poiLayer is not None: - QgsMapLayerRegistry.instance().removeMapLayer(self.poiLayer.id()) + QgsProject.instance().removeMapLayer(self.poiLayer.id()) self.canvas.setMapTool(self.pointTool) # Calls a canvas click and create_point self.dlg.setEnabled(False) self.create_point_layer() @@ -384,9 +379,7 @@ def start_process(self): gdalruntextdsm_build = gdalwarp_os_dep + ' -dstnodata -9999 -q -overwrite -te ' + str(x - r) + ' ' + str(y - r) + \ ' ' + str(x + r) + ' ' + str(y + r) + ' -of GTiff "' + \ filePath_dsm_build + '" "' + self.plugin_dir + '/data/clipdsm.tif"' - # gdalruntextdsm_build = 'gdalwarp -dstnodata -9999 -q -overwrite -cutline ' + dir_poly + \ - # ' -crop_to_cutline -of GTiff ' + filePath_dsm_build + \ - # ' ' + self.plugin_dir + '/data/clipdsm.tif' + if sys.platform == 'win32': subprocess.call(gdalruntextdsm_build, startupinfo=si) else: @@ -467,9 +460,7 @@ def start_process(self): gdalruntextvegdsm = gdalwarp_os_dep + ' -dstnodata -9999 -q -overwrite -te ' + str(x - r) + ' ' + str(y - r) + \ ' ' + str(x + r) + ' ' + str(y + r) + ' -of GTiff "' + \ filePath_vegdsm + '" "' + self.plugin_dir + '/data/clipvegdsm.tif"' - # gdalruntextdsm_build = 'gdalwarp -dstnodata -9999 -q -overwrite -cutline ' + dir_poly + \ - # ' -crop_to_cutline -of GTiff ' + filePath_dsm_build + \ - # ' ' + self.plugin_dir + '/data/clipdsm.tif' + if sys.platform == 'win32': subprocess.call(gdalruntextvegdsm, startupinfo=si) else: @@ -544,9 +535,6 @@ def start_process(self): Obukhov=Obukhov,ustar=ustar,dir=wdir,porosity=por,h=pbl,bld=dsm,veg=vegdsm,rows=sizey,cols=sizex,res=res, dlg=self.dlg,maxfetch=r,rm=Rm) - #QMessageBox.critical(None, "FPR model complete") - #return - #If zd and z0 are lower than open country, set to open country for i in np.arange(0,it,1): if Wz_d_output[i]< 0.03: @@ -587,7 +575,11 @@ def start_process(self): rlayer.triggerRepaint() + QMessageBox.information(None, "Source Area Model: ", "Process successful!") + self.dlg.progressBar.setValue(0) + def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Land%20Cover%20Land%20Cover%20" \ - "Fraction%20(Point).html" + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Pre-Processor:" \ + "_Urban_Morphology:_Footprint_Model_.28Point.29" webbrowser.open_new_tab(url) \ No newline at end of file diff --git a/FootprintModel/footprint_model_dialog.py b/FootprintModel/footprint_model_dialog.py index 6865bd9..fdde5e6 100644 --- a/FootprintModel/footprint_model_dialog.py +++ b/FootprintModel/footprint_model_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'footprint_model_dialog_base.ui')) -class FootprintModelDialog(QtGui.QDialog, FORM_CLASS): +class FootprintModelDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(FootprintModelDialog, self).__init__(parent) diff --git a/FootprintModel/footprint_model_dialog_base.ui b/FootprintModel/footprint_model_dialog_base.ui index 6079c40..990606f 100644 --- a/FootprintModel/footprint_model_dialog_base.ui +++ b/FootprintModel/footprint_model_dialog_base.ui @@ -82,6 +82,9 @@ p, li { white-space: pre-wrap; } + + false + 175 @@ -173,6 +176,9 @@ p, li { white-space: pre-wrap; } + + false + 175 @@ -296,6 +302,9 @@ p, li { white-space: pre-wrap; } + + false + 175 @@ -816,12 +825,12 @@ p, li { white-space: pre-wrap; } setEnabled(bool) - 96 - 325 + 115 + 297 - 653 - 356 + 682 + 301 @@ -832,12 +841,12 @@ p, li { white-space: pre-wrap; } setEnabled(bool) - 83 - 325 + 102 + 297 - 503 - 353 + 599 + 298 @@ -848,12 +857,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 72 - 325 + 91 + 297 - 653 - 382 + 342 + 327 @@ -864,12 +873,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 74 - 325 + 93 + 297 - 653 - 408 + 342 + 353 @@ -880,12 +889,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 71 - 325 + 90 + 297 - 653 - 434 + 342 + 379 @@ -896,12 +905,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 111 - 325 + 130 + 297 - 653 - 486 + 342 + 405 @@ -912,12 +921,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 123 - 325 + 142 + 297 - 653 - 512 + 342 + 431 @@ -928,12 +937,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 134 - 325 + 153 + 297 - 653 - 538 + 682 + 327 @@ -944,12 +953,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 163 - 325 + 182 + 297 - 653 - 564 + 682 + 353 @@ -960,8 +969,8 @@ p, li { white-space: pre-wrap; } accept() - 662 - 784 + 691 + 593 243 @@ -980,8 +989,8 @@ p, li { white-space: pre-wrap; } 39 - 439 - 69 + 501 + 48 @@ -992,12 +1001,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 110 - 119 + 129 + 104 - 193 - 145 + 501 + 110 @@ -1008,12 +1017,12 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 121 - 119 + 140 + 104 - 237 - 171 + 501 + 146 @@ -1024,12 +1033,12 @@ p, li { white-space: pre-wrap; } setEnabled(bool) - 145 - 119 + 164 + 104 - 237 - 197 + 501 + 182 @@ -1040,12 +1049,12 @@ p, li { white-space: pre-wrap; } setEnabled(bool) - 91 - 248 + 110 + 238 - 653 - 275 + 255 + 239 @@ -1056,12 +1065,92 @@ p, li { white-space: pre-wrap; } setDisabled(bool) - 119 - 325 + 138 + 297 + + + 682 + 379 + + + + + checkBoxUseVeg + clicked(bool) + widgetVegDSM + setEnabled(bool) + + + 70 + 233 + + + 520 + 236 + + + + + checkBoxVectorLayer + clicked(bool) + widgetPointLayer + setEnabled(bool) + + + 31 + 35 + + + 515 + 31 + + + + + checkBoxOnlyBuilding + clicked(bool) + widgetDSMbuild + setEnabled(bool) + + + 45 + 98 + + + 521 + 171 + + + + + checkBoxOnlyBuilding + clicked(bool) + widgetDSMbuildground + setDisabled(bool) + + + 82 + 98 + + + 517 + 100 + + + + + checkBoxOnlyBuilding + clicked(bool) + widgetDEM + setDisabled(bool) + + + 102 + 93 - 653 - 590 + 550 + 138 diff --git a/FootprintModel/resources_rc.py b/FootprintModel/resources_rc.py index 4210267..5571520 100644 --- a/FootprintModel/resources_rc.py +++ b/FootprintModel/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/GreaterQF/PythonQF2/Config.py b/GreaterQF/PythonQF2/Config.py index c618898..238fc9b 100644 --- a/GreaterQF/PythonQF2/Config.py +++ b/GreaterQF/PythonQF2/Config.py @@ -1,3 +1,6 @@ +from builtins import str +from builtins import map +from builtins import object from datetime import date as dtd import datetime as dt from ...Utilities import f90nml as nml @@ -7,7 +10,7 @@ def to_date(x): return dt.datetime.strptime(x, '%Y-%m-%d').date() -class Config: +class Config(object): def __init__(self): self.dt_start = None @@ -43,14 +46,14 @@ def loadFromNamelist(self, configFile): # Load the properties from a namelist try: CONFIG = nml.read(configFile) - except Exception,e: + except Exception as e: raise ValueError('Could not process config file ' + configFile + ': ' + str(e)) # Try dates out try: - self.dt_start = map(function=to_date, sequence=CONFIG['input_nml']['start_dates']) - self.dt_end = map(function=to_date, sequence=CONFIG['input_nml']['end_dates']) - except Exception, e: + self.dt_start = list(map(function=to_date, sequence=CONFIG['input_nml']['start_dates'])) + self.dt_end = list(map(function=to_date, sequence=CONFIG['input_nml']['end_dates'])) + except Exception as e: raise ValueError('Could not interpret dates for model configuration: ' + str(e)) self.checkInput(CONFIG['input_nml']) diff --git a/GreaterQF/PythonQF2/DailyEnergyLoading.py b/GreaterQF/PythonQF2/DailyEnergyLoading.py index f41c628..77a73e8 100644 --- a/GreaterQF/PythonQF2/DailyEnergyLoading.py +++ b/GreaterQF/PythonQF2/DailyEnergyLoading.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from builtins import map +from builtins import object # Object that stores and retrieves coefficients that describe the total energy used (gas or electricity) each day # This allows GreaterQF to disaggregate the annual total to a particular day import os @@ -8,11 +11,11 @@ pass import pytz -from DataManagement.DailyLoading import DailyLoading -from DataManagement.LookupLogger import LookupLogger +from .DataManagement.DailyLoading import DailyLoading +from .DataManagement.LookupLogger import LookupLogger -class DailyEnergyLoading: +class DailyEnergyLoading(object): ''' Manage daily energy loadings for different energy types ''' @@ -54,18 +57,18 @@ def dealWithInputFile(self, file): # Check file is of correct format dl = pd.read_csv(file,skipinitialspace=True) - dl.columns = map(lower, dl.columns) + dl.columns = list(map(lower, dl.columns)) # Expect certain keywords - if 'fuel' not in dl.keys(): + if 'fuel' not in list(dl.keys()): raise ValueError('First column of first row must be \'Fuel\' in ' + file) - if 'gas' not in dl.keys(): + if 'gas' not in list(dl.keys()): raise ValueError('One of the column headers in ' + file + ' must be \'Gas\'') - if 'elec' not in dl.keys(): + if 'elec' not in list(dl.keys()): raise ValueError('One of the column headers in ' + file + ' must be \'Elec\'') - rowHeaders = map(lower, dl.fuel[0:3]) + rowHeaders = list(map(lower, dl.fuel[0:3])) if 'startdate' != rowHeaders[0]: raise ValueError('First column of second row must be \'StartDate\' in ' + file) @@ -78,17 +81,17 @@ def dealWithInputFile(self, file): firstDataLine = 3 # Try to extract the timezone from the file header try: - tz = pytz.timezone(dl[dl.keys()[1]][2]) + tz = pytz.timezone(dl[list(dl.keys())[1]][2]) except Exception: - raise ValueError('Invalid timezone "' + dl[dl.keys()[1]][2] + '" specified in ' + file + + raise ValueError('Invalid timezone "' + dl[list(dl.keys())[1]][2] + '" specified in ' + file + '. This should be of the form "UTC" or "Europe/London" as per python timezone documentation') # Rest of rows 1 and 2 should be dates try: - sd = pd.datetime.strptime(dl[dl.keys()[1]][0], '%Y-%m-%d') - ed = pd.datetime.strptime(dl[dl.keys()[1]][1], '%Y-%m-%d') - except Exception, e: + sd = pd.datetime.strptime(dl[list(dl.keys())[1]][0], '%Y-%m-%d') + ed = pd.datetime.strptime(dl[list(dl.keys())[1]][1], '%Y-%m-%d') + except Exception as e: raise Exception('The second and third rows of ' + file + ' must be dates in the format YYYY-mm-dd') sd = tz.localize(sd) diff --git a/GreaterQF/PythonQF2/DailyFactors.py b/GreaterQF/PythonQF2/DailyFactors.py index db3981a..aab5b6b 100644 --- a/GreaterQF/PythonQF2/DailyFactors.py +++ b/GreaterQF/PythonQF2/DailyFactors.py @@ -1,8 +1,11 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import object # Stores and retrieves daily factors for buildings from datetime import datetime as dt -from DataManagement.temporalHelpers import holidaysForYear +from .DataManagement.temporalHelpers import holidaysForYear -class DailyFact: +class DailyFact(object): def __init__(self, use_uk_holidays, custom_holidays=[]): ''' :param use_uk_holidays: Boolean: Generate and use standard UK holidays @@ -34,11 +37,19 @@ def testIt(): import pandas as pd a = DailyFact(True) date = pd.date_range(pd.datetime.strptime('2015-01-01 00:00', '%Y-%m-%d %H:%M'), tz='Europe/London', periods=5)[1] - print a.getFact(dt.strptime('2015-01-01', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-02', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-03', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-04', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-05', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-06', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-07', '%Y-%m-%d')) - print a.getFact(dt.strptime('2015-01-08', '%Y-%m-%d')) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-01', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-02', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-03', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-04', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-05', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-06', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-07', '%Y-%m-%d'))) + # fix_print_with_import + print(a.getFact(dt.strptime('2015-01-08', '%Y-%m-%d'))) diff --git a/GreaterQF/PythonQF2/DataManagement/DailyLoading.py b/GreaterQF/PythonQF2/DataManagement/DailyLoading.py index 9546757..e0c6dc7 100644 --- a/GreaterQF/PythonQF2/DataManagement/DailyLoading.py +++ b/GreaterQF/PythonQF2/DataManagement/DailyLoading.py @@ -1,9 +1,12 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map try: import numpy as np except: pass -from GenericAnnualSampler import GenericAnnualSampler -from temporalHelpers import * +from .GenericAnnualSampler import GenericAnnualSampler +from .temporalHelpers import * class DailyLoading(GenericAnnualSampler): # Object to store and retrieve annualised pandas time series @@ -77,7 +80,7 @@ def extractCorrectEntry(self, df, endOfTimestep, timestepDuration, wd): # Within the series, find the most recent occurrence of this day of week # Is the section of data provided to us correct? It should be, given earlier stages, but still... - dows_available = map(self.getDOW, [d.to_pydatetime() for d in df.index]) + dows_available = list(map(self.getDOW, [d.to_pydatetime() for d in df.index])) # Return the value and the corresponding date from which it came dateNeeded = (endOfTimestep - timedelta(seconds=timestepDuration-1)) use = np.array(dows_available) == wd @@ -100,6 +103,6 @@ def hasDOW(self, dow, year): # Get days of week present in each startDate's entry. # Each entry must be a pandas timeseries, in which case the day of week is converted from the timestamp dates = [d.to_pydatetime() for d in self.yearContents[startDate]['data'].index] - result[startDate] = (dow in list(np.unique(map(self.getDOW, dates)))) + result[startDate] = (dow in list(np.unique(list(map(self.getDOW, dates))))) return pd.Series(result) \ No newline at end of file diff --git a/GreaterQF/PythonQF2/DataManagement/GenericAnnualSampler.py b/GreaterQF/PythonQF2/DataManagement/GenericAnnualSampler.py index a99d4dd..ecfd621 100644 --- a/GreaterQF/PythonQF2/DataManagement/GenericAnnualSampler.py +++ b/GreaterQF/PythonQF2/DataManagement/GenericAnnualSampler.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map +from builtins import object # Class to handle temporal profiles for different year, season, day of week and time of day # to make it easy to pull the relevant number out @@ -7,8 +11,8 @@ pass from dateutil.relativedelta import * -from temporalHelpers import * -from LookupLogger import LookupLogger +from .temporalHelpers import * +from .LookupLogger import LookupLogger from datetime import datetime as dt from datetime import date as dateType @@ -51,7 +55,7 @@ def specialHolidays(self, holidayDates): self.extraHolidays = holidayDates def niceDate(dateobj): return dateobj.strftime('%Y-%m-%d') if holidayDates not in [None, []]: - self.logger.addEvent('TemporalSampler', None, None, None, 'Special bank holidays added: ' + str(map(niceDate, holidayDates))) + self.logger.addEvent('TemporalSampler', None, None, None, 'Special bank holidays added: ' + str(list(map(niceDate, holidayDates)))) def useUKHolidays(self, state): '''Use UK bank holidays: Christmas, Boxing day, New Year's day, Easter Friday and Monday, May day, early and late summer diff --git a/GreaterQF/PythonQF2/DataManagement/LookupLogger.py b/GreaterQF/PythonQF2/DataManagement/LookupLogger.py index a99f1cf..9436c03 100644 --- a/GreaterQF/PythonQF2/DataManagement/LookupLogger.py +++ b/GreaterQF/PythonQF2/DataManagement/LookupLogger.py @@ -1,5 +1,7 @@ +from builtins import str +from builtins import object from collections import OrderedDict -class LookupLogger: +class LookupLogger(object): # Logger object to keep track of data requests and what was provided when the available data didn't match the requested date def __init__(self): @@ -19,10 +21,10 @@ def addEvent(self, eventType, requestedDate, actualDate, paramName, description) :return: None ''' - if eventType not in self.log.keys(): + if eventType not in list(self.log.keys()): self.log[eventType] = OrderedDict() - if requestedDate not in self.log[eventType].keys(): + if requestedDate not in list(self.log[eventType].keys()): self.log[eventType][requestedDate] = [] newEntry = [actualDate, paramName, description] @@ -50,13 +52,13 @@ def writeFile(self, filename): ''' try: f = open(filename, 'w') - except Exception,e: + except Exception as e: raise Exception('Could not write to log file:' + str(filename) + ':' + str(e)) f.write('Requested Date (if applic):: Date returned (if applic) :: Param name :: Description\r\n') - for eventType in self.log.keys(): + for eventType in list(self.log.keys()): f.write('======' + str(eventType) + '=======\r\n') - for requestTime in self.log[eventType].keys(): + for requestTime in list(self.log[eventType].keys()): printReqTime = 'None' if requestTime is None else requestTime.strftime('%Y-%m-%d %H:%M:%S %Z') for logLine in self.log[eventType][requestTime]: printActualTime = 'None' if logLine[0] is None else logLine[0].strftime('%Y-%m-%d %H:%M:%S %Z') diff --git a/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler.py b/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler.py index 1b42b56..cd53b73 100644 --- a/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler.py +++ b/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler.py @@ -1,8 +1,12 @@ -from spatialHelpers import * +from __future__ import absolute_import +from builtins import map +from builtins import str +from builtins import object +from .spatialHelpers import * from qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform import processing -from PyQt4.QtCore import QVariant, QSettings +from qgis.PyQt.QtCore import QVariant, QSettings try: import pandas as pd import numpy as np @@ -10,10 +14,10 @@ pass import os from datetime import datetime as dt -from LookupLogger import LookupLogger +from .LookupLogger import LookupLogger from shutil import rmtree -class SpatialTemporalResampler: +class SpatialTemporalResampler(object): # Class that takes spatial data (QgsVectorLayers), associates them with a time and # allows them to be spatially resampled to output polygons based on attribute values # Also supports a single value for all space via same interface @@ -67,7 +71,7 @@ def setOutputShapefile(self, shapefile, epsgCode, id_field): self.templateIdField = id_field # Create mapping from real (numeric) feature ID to desired (string) feature ID a = shapefile_attributes(self.outputLayer)[id_field] - self.featureMapper = pd.Series(index = a.index, data = map(intOrString, a.values)) + self.featureMapper = pd.Series(index = a.index, data = list(map(intOrString, a.values))) # record what was used to label features if self.logger is not None: @@ -81,7 +85,7 @@ def getOutputShapefile(self): return self.outputLayer def getOutputFeatureIds(self): - return shapefile_attributes(self.outputLayer).keys() + return list(shapefile_attributes(self.outputLayer).keys()) def dealWithSingleValue(self, value, startTime, attributeToUse): ''' Create a QgsVectorLayer based on self.outputLayer with field attributeToUse the same value all the way through ''' @@ -149,7 +153,7 @@ def dealWithVectorLayer(self, shapefileInput, epsgCode, startTime, attributeToUs try: vectorLayer = openShapeFileInMemory(shapefileInput, targetEPSG=self.templateEpsgCode) - except Exception, e: + except Exception as e: raise ValueError('Could not load shapefile at ' + shapefileInput) try: @@ -187,7 +191,7 @@ def injectInput(self, shapefileInput, epsgCode, attributeToUse, startTime): if startTime.tzinfo is None: raise ValueError('Start time must have a timezone attached') - if type(shapefileInput) not in [str, unicode]: + if type(shapefileInput) not in [str, str]: raise ValueError('Shapefile input (' + str(shapefileInput) + ') is not a string filename') if not os.path.exists(shapefileInput): @@ -196,7 +200,7 @@ def injectInput(self, shapefileInput, epsgCode, attributeToUse, startTime): # Load the layer straight from disk as we won't be making any modifications to it try: vectorLayer = loadShapeFile(shapefileInput) - except Exception, e: + except Exception as e: raise ValueError('Could not load shapefile at ' + shapefileInput) if type(attributeToUse) is not list: @@ -210,7 +214,7 @@ def updateLayers(self, attributeToUse, layer, startTime): # Extract attributes table so it doesn't have to be done later satts = shapefile_attributes(layer) # Make sure table is indexed by what the user wanted - satts.index = map(intOrString, satts[self.templateIdField].loc[satts.index.tolist()]) + satts.index = list(map(intOrString, satts[self.templateIdField].loc[satts.index.tolist()])) if self.dataLayers is None: # Instantiate new time series of vector layers, make a copy of attributes table and record which attribute(s) are of interest @@ -358,7 +362,7 @@ def intorstring(x): except: return str(x) - readAcross = pd.Series(index=map(intorstring, t.values), data=map(intorstring, t.index)) + readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index))) t = None # Get areas of input shapefile intersected by output shapefile, and proportions covered, and attribute vals @@ -389,28 +393,28 @@ def intorstring(x): # Select successfully identified output areas - newShapeFile.setSelectedFeatures(list(readAcross[disagg.keys()])) + newShapeFile.setSelectedFeatures(list(readAcross[list(disagg.keys())])) selectedOutputFeatures = newShapeFile.selectedFeatures() newShapeFile.startEditing() # Apply disaggregation to features for outputFeat in selectedOutputFeatures: # For each output feature # Select the relevant features from the input layer - area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in disagg[outputFeat[self.templateIdField]].keys()} + area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in list(disagg[outputFeat[self.templateIdField]].keys())} # Calculate area-weighted average to get a single value for each output area for field in fieldsToSample: # The values to disaggregate in all regions touching this output feature - input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in intersectedAreas[outputFeat[self.templateIdField]].keys()} + input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in list(intersectedAreas[outputFeat[self.templateIdField]].keys())} # If an output area is influenced by multiple input areas, and a subset of these is invalid, # assign them zero - for i in input_values.keys(): + for i in list(input_values.keys()): try: input_values[i] = float(input_values[i]) except: input_values[i] = 0 # Combine values in all input regions touching this output feature. If disagg_weightings missed one out it's because no intersection or NULL data. # Any value intersecting an output area with NULL weighting will be excluded - outputAreasToUse = set(input_values.keys()).intersection(area_weightings.keys()) + outputAreasToUse = set(input_values.keys()).intersection(list(area_weightings.keys())) weighted_average = np.sum(np.array([input_values[in_id] * float(area_weightings[in_id]) for in_id in list(outputAreasToUse)])) newShapeFile.changeAttributeValue(outputFeat.id(), fieldIndices[field], float(weighted_average)) diff --git a/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler_LUCY.py b/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler_LUCY.py index d5f465b..a22aaf0 100644 --- a/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler_LUCY.py +++ b/GreaterQF/PythonQF2/DataManagement/SpatialTemporalResampler_LUCY.py @@ -1,8 +1,12 @@ -from spatialHelpers import * +from __future__ import print_function +from __future__ import absolute_import +from builtins import map +from builtins import str +from .spatialHelpers import * from qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform import processing -from PyQt4.QtCore import QVariant, QSettings +from qgis.PyQt.QtCore import QVariant, QSettings try: import pandas as pd import numpy as np @@ -11,8 +15,8 @@ import os from datetime import datetime as dt import tempfile -from LookupLogger import LookupLogger -from SpatialTemporalResampler import SpatialTemporalResampler +from .LookupLogger import LookupLogger +from .SpatialTemporalResampler import SpatialTemporalResampler class SpatialTemporalResampler_LUCY(SpatialTemporalResampler): # Class that takes spatial data (QgsVectorLayers), associates them with a time and @@ -71,7 +75,7 @@ def intorstring(x): except: return str(x) - readAcross = pd.Series(index=map(intorstring, t.values), data=map(intorstring, t.index)) + readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index))) t = None # Get areas of input shapefile intersected by output shapefile, and proportions covered, and attribute vals @@ -79,7 +83,8 @@ def intorstring(x): # Work out disaggregation factor baed on area intersected # Use "big" totals of weightings if the same attribute present in the input data file total_weightings = {} # Assume no "big" totals are available - print 'WB:' + str(weight_by) + # fix_print_with_import + print('WB:' + str(weight_by)) if weight_by in get_field_names(inputLayer): atts = shapefile_attributes(inputLayer) total_weightings = {weight_by:{intOrString(atts[inputIdField].loc[idx]):atts[weight_by].loc[idx] for idx in atts.index}} @@ -98,26 +103,26 @@ def intorstring(x): disagg = disaggregate_weightings(intersectedAreas, newShapeFile, weight_by, total_weightings, self.templateIdField)[weight_by] # Select successfully identified output areas - newShapeFile.setSelectedFeatures(list(readAcross[disagg.keys()])) + newShapeFile.setSelectedFeatures(list(readAcross[list(disagg.keys())])) selectedOutputFeatures = newShapeFile.selectedFeatures() newShapeFile.startEditing() # Apply disaggregation to features for outputFeat in selectedOutputFeatures: # For each output feature # Select the relevant features from the input layer - area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in disagg[outputFeat[self.templateIdField]].keys()} + area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in list(disagg[outputFeat[self.templateIdField]].keys())} # Calculate area-weighted average to get a single value for each output area for field in fieldsToSample: - input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in intersectedAreas[outputFeat[self.templateIdField]].keys()} + input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in list(intersectedAreas[outputFeat[self.templateIdField]].keys())} # If an output area is influenced by multiple input areas, and a subset of these is invalid, # assign them zero - for i in input_values.keys(): + for i in list(input_values.keys()): try: input_values[i] = float(input_values[i]) except: input_values[i] = 0 - outputAreasToUse = set(input_values.keys()).intersection(area_weightings.keys()) + outputAreasToUse = set(input_values.keys()).intersection(list(area_weightings.keys())) weighted_average = np.sum(np.array([input_values[in_id] * float(area_weightings[in_id]) for in_id in list(outputAreasToUse)])) newShapeFile.changeAttributeValue(outputFeat.id(), fieldIndices[field], float(weighted_average)) diff --git a/GreaterQF/PythonQF2/DataManagement/TemporalProfileSampler.py b/GreaterQF/PythonQF2/DataManagement/TemporalProfileSampler.py index 16ce271..d83f3e4 100644 --- a/GreaterQF/PythonQF2/DataManagement/TemporalProfileSampler.py +++ b/GreaterQF/PythonQF2/DataManagement/TemporalProfileSampler.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import +from builtins import str # Class to handle temporal profiles for different year, season, day of week and time of day # to make it easy to pull the relevant number out @@ -9,9 +11,9 @@ from datetime import datetime as dt from datetime import timedelta from dateutil.relativedelta import * -from temporalHelpers import * +from .temporalHelpers import * #from samplerHelpers import * -from GenericAnnualSampler import GenericAnnualSampler +from .GenericAnnualSampler import GenericAnnualSampler class TemporalProfileSampler(GenericAnnualSampler): diff --git a/GreaterQF/PythonQF2/DataManagement/samplerHelpers.py b/GreaterQF/PythonQF2/DataManagement/samplerHelpers.py index 6015f63..ec47dcb 100644 --- a/GreaterQF/PythonQF2/DataManagement/samplerHelpers.py +++ b/GreaterQF/PythonQF2/DataManagement/samplerHelpers.py @@ -43,7 +43,7 @@ def addPeriod(obj, startDate, endDate, weekSeries, timezone=pytz.timezone('Europ startEndDates[y] = [max(dt(y, 1, 1), startDate), min(dt(y, 12, 31), endDate)] # Go round each year (if necessary), adding entries - for y in startEndDates.keys(): + for y in list(startEndDates.keys()): # Within the year, set start date of period as DOY sd = startEndDates[y][0] # start date ed = startEndDates[y][1] # End date @@ -52,7 +52,7 @@ def addPeriod(obj, startDate, endDate, weekSeries, timezone=pytz.timezone('Europ formattedSeries = obj.dealWithSeries(weekSeries) # Add straight to the dict if it's the first entry for the year - if y not in obj.yearContents.keys(): + if y not in list(obj.yearContents.keys()): obj.yearContents[y] = pd.Series(index=[sd], data=[{'isDST': isDST, 'data': formattedSeries.copy(deep=True)}]) diff --git a/GreaterQF/PythonQF2/DataManagement/spatialHelpers.py b/GreaterQF/PythonQF2/DataManagement/spatialHelpers.py index 80642b9..361f8f8 100644 --- a/GreaterQF/PythonQF2/DataManagement/spatialHelpers.py +++ b/GreaterQF/PythonQF2/DataManagement/spatialHelpers.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range # Helper methods to do spatial and shapefile-related manipulations # amg 23/06/2016 import string @@ -8,12 +10,12 @@ except: pass from qgis.core import QgsVectorFileWriter, QgsVectorLayer, QgsRasterLayer, QgsGeometry, QgsRaster, QgsRectangle, QgsPoint, QgsField, QgsFeature, QgsSpatialIndex, QgsMessageLog -from qgis.core import QgsMapLayerRegistry, QgsSymbolV2, QgsGraduatedSymbolRendererV2, QgsRendererRangeV2, QgsFeatureRequest, QgsExpression, QgsDistanceArea +from qgis.core import QgsSymbol, QgsGraduatedSymbolRenderer, QgsRendererRange, QgsFeatureRequest, QgsExpression, QgsDistanceArea from qgis.analysis import QgsZonalStatistics import processing # qgis processing framework -from PyQt4.QtCore import QVariant, QPyNullVariant +from qgis.PyQt.QtCore import QVariant, QPyNullVariant import tempfile -from string import lower +# from string import lower def reprojectVectorLayer_threadSafe(filename, targetEpsgCode): ''' Does the same thing as reprojectVectorLayer but in a thread safe way''' @@ -74,7 +76,7 @@ def reprojectVectorLayer(filename, targetEpsgCode): # Copy features for orig_feat in orig_layer.getFeatures(): orig_id = orig_feat.id() - for fieldName in orig_fieldNames.keys(): + for fieldName in list(orig_fieldNames.keys()): try: new_val = float(orig_feat[orig_fieldNames[fieldName]]) reproj_layer.changeAttributeValue(orig_id, reproj_fieldNames[fieldName], new_val) @@ -127,10 +129,10 @@ def calculate_fuel_use(inputLayer, inputIdField, petrolFields = {'motorcycle':'_FC_Pmcyc', 'artic':'_FC_Part', 'rigid':'_FC_Prig', 'taxi':'_FC_Ptaxi', 'car':'_FC_Pcar', 'bus':'_FC_Pbus', 'lgv':'_FC_Plgv'} # Get overall list of new attrib names - consumption_attributes = dieselFields.values() - consumption_attributes.extend(petrolFields.values()) + consumption_attributes = list(dieselFields.values()) + consumption_attributes.extend(list(petrolFields.values())) fieldMap = {'diesel':dieselFields, 'petrol':petrolFields} - modelledTypes = petrolFields.keys() + modelledTypes = list(petrolFields.keys()) # Read-across from our road classes to EuroClass road classes (used in the FuelConsumption object) roadAcross = {'motorway':'motorway', 'primary_road':'urban', 'secondary_road':'urban', 'other':'urban'} @@ -146,7 +148,7 @@ def calculate_fuel_use(inputLayer, inputIdField, for roadType in roadTypes: # For each road type in the file # If we don't explicitly consider this road type as motorway, A road or B road, just consider it "other" - if roadType not in roadTypeLookup.keys(): + if roadType not in list(roadTypeLookup.keys()): roadTypeOfficial = 'other' else: roadTypeOfficial = roadTypeLookup[roadType] @@ -215,7 +217,7 @@ def calculate_fuel_use(inputLayer, inputIdField, for key in modelledTypes: aadtData[key] = np.array(inputLayer.getDoubleValues(vAADTFields[key], selectedOnly = True)[0]) # Populate car, bus and LGV differently to above - for fuelType in fieldMap.keys(): + for fuelType in list(fieldMap.keys()): newValues[fieldMap[fuelType]['car']].loc[ids] = aadtData['total_car'] *\ modelParams.fuelFractions['car'][fuelType] *\ lkm *\ @@ -231,8 +233,8 @@ def calculate_fuel_use(inputLayer, inputIdField, alreadyCalculated = ['car', 'lgv', 'bus'] # The remaining vehicle types all get calculated in the same way so loop over fieldMap to save on code... - for fuelType in fieldMap.keys(): - for vehType in fieldMap[fuelType].keys(): + for fuelType in list(fieldMap.keys()): + for vehType in list(fieldMap[fuelType].keys()): if vehType not in alreadyCalculated: newValues[fieldMap[fuelType][vehType]].loc[ids] = aadtData[vehType] * \ modelParams.fuelFractions[vehType][fuelType] * \ @@ -480,9 +482,9 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # turn the input dictionary inside out to get all the output features touching a given input feature input_features = {} - for out_id in intersectedAmounts.keys(): - for in_id in intersectedAmounts[out_id].keys(): - if in_id not in input_features.keys(): + for out_id in list(intersectedAmounts.keys()): + for in_id in list(intersectedAmounts[out_id].keys()): + if in_id not in list(input_features.keys()): input_features[in_id] = {} # Each entry contains the area intersected, size and values of the input feature # Append the weighting attributes to the input data dict @@ -503,11 +505,11 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute disagg_weightings = {wa:{} for wa in weightingAttributes} # A weighting for each weighting attrib totals_already_available = True - if len(total_weightings.keys()) == 0: + if len(list(total_weightings.keys())) == 0: totals_already_available = False total_weightings = {wa:{} for wa in weightingAttributes} - elif len(total_weightings.keys()) != len(weightingAttributes): + elif len(list(total_weightings.keys())) != len(weightingAttributes): raise ValueError('Total weightings are not present for all weighting attributes') num_outfeats = {} # Keep track of the number of output features (reflects partial overlaps) intersecting each input feature @@ -517,7 +519,7 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # everything in a partially-covered input area leaping into the output areas that cover it. inputAreaCovered = {} - for in_id in input_features.keys(): + for in_id in list(input_features.keys()): num_outfeats[in_id] = 0.0 if not totals_already_available: # Keep a running total of the weightings falling within input feature for normalisation @@ -526,7 +528,7 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # Add up what proportion of input area has been covered inputAreaCovered[in_id] = 0.0 - for out_id in input_features[in_id].keys(): + for out_id in list(input_features[in_id].keys()): # If not all of the output area intersects the input area, don't use all of the output area's weighting # Use proportion_of_output_area_intersected * weighting as the weighting. This prevents an output area from "stealing" # all of the disaggregated value when only a sliver intersects the input area @@ -536,7 +538,7 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute inputAreaCovered[in_id] += input_features[in_id][out_id]['amountIntersected']/input_features[in_id][out_id]['originalAmount'] for wa in weightingAttributes: - if out_id not in disagg_weightings[wa].keys(): # If none of the output areas in this input area, just allocate empty entries + if out_id not in list(disagg_weightings[wa].keys()): # If none of the output areas in this input area, just allocate empty entries disagg_weightings[wa][out_id] = {} # Dict contains contribution from each input ID intersecting this out_id for wa in weightingAttributes: @@ -554,8 +556,8 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # in which case the totals are rightly used to downscale the "big" value. # If we find the total is zero over the whole input area, then spread everything evenly across inside that area # to prevent throwing away the quantity to be disaggregated - for in_id in input_features.keys(): - for out_id in input_features[in_id].keys(): + for in_id in list(input_features.keys()): + for out_id in list(input_features[in_id].keys()): for wa in weightingAttributes: # Only use those values that are available (may have been skipped above) try: @@ -887,8 +889,8 @@ def duplicateVectorLayer(inLayer, targetEPSG=None, label=None): return newLayer def colourRanges(displayLayer, attribute, opacity, range_minima, range_maxima, colours): - from qgis.core import QgsMapLayerRegistry, QgsSymbolV2, QgsGraduatedSymbolRendererV2, QgsRendererRangeV2 - from PyQt4.QtGui import QColor + from qgis.core import QgsSymbolV2, QgsGraduatedSymbolRendererV2, QgsRendererRangeV2 + from qgis.PyQt.QtGui import QColor # Colour vector layer according to the value of attribute <>, with ranges set out by <> (list), <> (list) # using <> @@ -926,7 +928,7 @@ def populateShapefileFromTemplate(dataMatrix, primaryKey, templateShapeFile, ''' - if type(templateShapeFile) in [unicode, str]: + if type(templateShapeFile) in [str, str]: # Open existing layer and try to set its CRS layer=openShapeFileInMemory(templateShapeFile, templateEpsgCode, label=title) diff --git a/GreaterQF/PythonQF2/DataManagement/temporalHelpers.py b/GreaterQF/PythonQF2/DataManagement/temporalHelpers.py index e34d8fc..8278de6 100644 --- a/GreaterQF/PythonQF2/DataManagement/temporalHelpers.py +++ b/GreaterQF/PythonQF2/DataManagement/temporalHelpers.py @@ -1,3 +1,4 @@ +from builtins import str # Helper methods for temporal calculations and calendar events from datetime import datetime as dt @@ -52,10 +53,10 @@ def holidaysForYear(year): # Christmas day/boxing day falling on weekend isn't included (assumed standard weekend) holidays = [] # New year: - holidays.append(dt(year, 01, 01)) + holidays.append(dt(year, 0o1, 0o1)) # If 2 or 3 january is a monday, this is the bank holiday - jan2 = dt(year, 01, 02) - jan3 = dt(year, 01, 03) + jan2 = dt(year, 0o1, 0o2) + jan3 = dt(year, 0o1, 0o3) if jan2.weekday() == 0: holidays.append(jan2) if jan3.weekday() == 0: @@ -68,7 +69,7 @@ def holidaysForYear(year): holidays.extend([good_fri, easter_mon]) # Early and late may - may1 = dt(year, 05, 01) + may1 = dt(year, 0o5, 0o1) may1 = may1 if may1.weekday() is 0 else may1 + timedelta(7 - may1.weekday()) holidays.append(may1) holidays.append(dt(year, 5, 31) - timedelta(dt(year, 5, 31).weekday())) diff --git a/GreaterQF/PythonQF2/Disaggregate.py b/GreaterQF/PythonQF2/Disaggregate.py index 6e8cbdd..6673fa8 100644 --- a/GreaterQF/PythonQF2/Disaggregate.py +++ b/GreaterQF/PythonQF2/Disaggregate.py @@ -1,11 +1,12 @@ +from __future__ import absolute_import import os import pickle -from DataManagement.spatialHelpers import saveLayerToFile, loadShapeFile, shapefile_attributes, populateShapefileFromTemplate -from DataManagement.temporalHelpers import makeUTC -from EnergyUseData import EnergyUseData -from FuelConsumption import FuelConsumption -from Population import Population -from Transport import Transport +from .DataManagement.spatialHelpers import saveLayerToFile, loadShapeFile, shapefile_attributes, populateShapefileFromTemplate +from .DataManagement.temporalHelpers import makeUTC +from .EnergyUseData import EnergyUseData +from .FuelConsumption import FuelConsumption +from .Population import Population +from .Transport import Transport def disaggregate(qfDataSources, qfParams, outputFolder): ''' diff --git a/GreaterQF/PythonQF2/EnergyProfiles.py b/GreaterQF/PythonQF2/EnergyProfiles.py index 1f8d579..e112570 100644 --- a/GreaterQF/PythonQF2/EnergyProfiles.py +++ b/GreaterQF/PythonQF2/EnergyProfiles.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import map +from builtins import range +from builtins import object # Object that stores and retrieves energy use profiles for different seasons and times of day for GreaterQF # An energy profile is a week-long template of relative energy use that changes only with season @@ -10,10 +14,10 @@ pass import pytz -from DataManagement.LookupLogger import LookupLogger -from DataManagement.TemporalProfileSampler import TemporalProfileSampler +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.TemporalProfileSampler import TemporalProfileSampler -class EnergyProfiles: +class EnergyProfiles(object): def __init__(self, city, use_uk_holidays, customHolidays = [], logger=LookupLogger()): ''' Instantiate :param city : String specifying which city is being modelled (e.g. Europe/London). Must be compatible with pytz.timezone standard @@ -103,11 +107,11 @@ def dealWithInputFile(self, energyComponent, file): dl = pd.read_csv(file,skipinitialspace=True, header=None) # Should be 3x each season in header - if (len(dl.keys())-1)%3 != 0: + if (len(list(dl.keys()))-1)%3 != 0: raise ValueError('There must be 3 columns for each named season in ' + file) # Expect certain keywords - rowHeadings = map(lower, dl[0][0:6]) + rowHeadings = list(map(lower, dl[0][0:6])) if 'season' != rowHeadings[0]: raise ValueError('First column of row 1 must be \'Season\' in ' + file) @@ -129,9 +133,9 @@ def dealWithInputFile(self, energyComponent, file): firstDataLine = 6 # Try to extract the timezone from the file header try: - tz = pytz.timezone(dl[dl.keys()[1]][5]) + tz = pytz.timezone(dl[list(dl.keys())[1]][5]) except Exception: - raise ValueError('Invalid timezone "' + dl[dl.keys()[1]][5] + '" specified in ' + file + + raise ValueError('Invalid timezone "' + dl[list(dl.keys())[1]][5] + '" specified in ' + file + '. This should be of the form "UTC" or "Europe/London" as per python timezone documentation') earliestStart = None @@ -143,7 +147,7 @@ def dealWithInputFile(self, energyComponent, file): try: sd = pd.datetime.strptime(dl[seasonStart][3], '%Y-%m-%d') ed = pd.datetime.strptime(dl[seasonStart][4], '%Y-%m-%d') - except Exception, e: + except Exception as e: raise Exception('Rows 4 and 5 of ' + file + ' must be dates in the format YYYY-mm-dd') sd = tz.localize(sd) diff --git a/GreaterQF/PythonQF2/EnergyUseData.py b/GreaterQF/PythonQF2/EnergyUseData.py index 0feed58..b020949 100644 --- a/GreaterQF/PythonQF2/EnergyUseData.py +++ b/GreaterQF/PythonQF2/EnergyUseData.py @@ -1,10 +1,14 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import object from datetime import datetime -from DataManagement.LookupLogger import LookupLogger -from DataManagement.SpatialTemporalResampler import SpatialTemporalResampler -from DataManagement.spatialHelpers import * -from PyQt4.QtCore import QSettings +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.SpatialTemporalResampler import SpatialTemporalResampler +from .DataManagement.spatialHelpers import * +from qgis.PyQt.QtCore import QSettings -class EnergyUseData: +class EnergyUseData(object): # Store spatially and temporally resolved energy use data for GreaterQF model # Makes heavy use of QGIS API @@ -261,6 +265,7 @@ def testIt(): a.setIndustrialGas(3.0, datetime.strptime('2014-01-01', '%Y-%m-%d'), 'DomGas') a.setEconomy7Elec(7.0, datetime.strptime('2014-01-01', '%Y-%m-%d'), 'DomGas') # Get downscaled shapefiles for 2014 - print a.getEnergyTable(datetime.strptime('2013-01-01', '%Y-%m-%d')) + # fix_print_with_import + print(a.getEnergyTable(datetime.strptime('2013-01-01', '%Y-%m-%d'))) return a.getDomesticElecLayer(datetime.strptime('2014-01-01', '%Y-%m-%d')) diff --git a/GreaterQF/PythonQF2/FuelConsumption.py b/GreaterQF/PythonQF2/FuelConsumption.py index 6e14ef3..0f1dcf1 100644 --- a/GreaterQF/PythonQF2/FuelConsumption.py +++ b/GreaterQF/PythonQF2/FuelConsumption.py @@ -1,10 +1,13 @@ +from builtins import str +from builtins import map +from builtins import object try: import pandas as pd except: pass import os from string import lower -class FuelConsumption(): +class FuelConsumption(object): def __init__(self, filename): ''' Class to read in fuel consumption file with prescribed format in g/km, and do lookups for different dates, road types and vehicle types, returning data in kg/km @@ -33,19 +36,19 @@ def todate(x): return pd.datetime.strptime(x, '%Y-%m-%d') # Validate the entries # Index level 0 is date, level 1 is fuel, level 2 is vehicle type - roadsPresent = list(pd.unique(self.data.keys())) + roadsPresent = list(pd.unique(list(self.data.keys()))) - missingRoads = list(set(self.roadTypes).difference(map(lower, roadsPresent))) + missingRoads = list(set(self.roadTypes).difference(list(map(lower, roadsPresent)))) if len(missingRoads) > 0: raise ValueError('Not all of the required road types were found in ' + filename + '. Expected: ' + str(self.roadTypes) + ' but got ' + str(roadsPresent)) fuelsPresent = list(pd.unique(self.data.index.levels[1])) - missingFuels = list(set(self.fuelTypes).difference(map(lower, fuelsPresent))) + missingFuels = list(set(self.fuelTypes).difference(list(map(lower, fuelsPresent)))) if len(missingFuels) > 0: raise ValueError('Not all of the required fuel types were found in ' + filename + '. Expected: ' + str(self.fuelTypes) + ' but got ' + str(fuelsPresent)) vehiclesPresent = list(pd.unique(self.data.index.levels[2])) - missingVehicles = list(set(self.vehicleTypes).difference(map(lower, vehiclesPresent))) + missingVehicles = list(set(self.vehicleTypes).difference(list(map(lower, vehiclesPresent)))) if len(missingVehicles) > 0: raise ValueError('Not all of the required vehicle types were found in ' + filename + '. Expected: ' + str(self.vehicleTypes) + ' but got ' + str(missingVehicles)) diff --git a/GreaterQF/PythonQF2/GQFDataSources.py b/GreaterQF/PythonQF2/GQFDataSources.py index 511bbf2..dc9a7c1 100644 --- a/GreaterQF/PythonQF2/GQFDataSources.py +++ b/GreaterQF/PythonQF2/GQFDataSources.py @@ -1,3 +1,8 @@ +from __future__ import print_function +from builtins import str +from builtins import map +from builtins import range +from builtins import object from ...Utilities import f90nml as nml import os from datetime import datetime as dt @@ -23,7 +28,7 @@ def validFile(x): if not os.path.exists(x): raise ValueError('The diurnal input file ' + str(x) + ' was not found') -class DataSources: +class DataSources(object): ''' Loads the data sources namelist, conducts validation and structures inputs for use with data management routines ''' def __init__(self, configFile): @@ -53,7 +58,7 @@ def __init__(self, configFile): try: ds = nml.read(configFile) - except Exception, e: + except Exception as e: raise ValueError('Unable to read data sources config file at: ' + str(configFile)) # Are all main entries present? @@ -79,7 +84,7 @@ def __init__(self, configFile): 'workplacepop':'workPop_spat', 'transportdata':'transport_spat'} - missing = list(set(expectedKeys_spatial).difference(ds.keys())) + missing = list(set(expectedKeys_spatial).difference(list(ds.keys()))) if len(missing) > 0: raise ValueError('Spatial entries missing from ' + str(configFile) + ' in namelist: ' + str(missing)) @@ -87,10 +92,10 @@ def __init__(self, configFile): for subEntry in expectedKeys_spatial: content_orig = ds[subEntry] # Do string matching, so make it all upper case - content = {upper(k):content_orig[k] for k in content_orig.keys()} + content = {upper(k):content_orig[k] for k in list(content_orig.keys())} # Check it's all lists or no lists - types = np.unique(map(type, content.values())) + types = np.unique(list(map(type, list(content.values())))) # are all sub-entries present? expectedNames_spat = ['shapefiles', 'startDates', 'epsgCodes', 'attribToUse', 'featureIds'] @@ -125,8 +130,8 @@ def __init__(self, configFile): 'AADT_rigid', 'AADT_artic'] - expectedNames_spat = map(upper, expectedNames_spat) - missing = list(set(map(upper, expectedNames_spat)).difference(content.keys())) + expectedNames_spat = list(map(upper, expectedNames_spat)) + missing = list(set(map(upper, expectedNames_spat)).difference(list(content.keys()))) if len(missing) > 0: raise ValueError('Entries missing from ' + subEntry + ' in namelist: ' + str(missing)) @@ -136,7 +141,7 @@ def __init__(self, configFile): # raise ValueError( # 'The namelist entries for ' + subEntry + ' have inconsistent lengths: some are lists and some are not') #if list not in types: - for k in content.keys(): + for k in list(content.keys()): if content[k] == '': content[k] = None content[k] = [content[k]] @@ -145,12 +150,12 @@ def __init__(self, configFile): #if len(np.unique(lengths)) > 1: # raise ValueError('The namelist entries for ' + subEntry + ' have inconsistent list lengths') - map(validateInput, content[expectedNames_spat[0]]) + list(map(validateInput, content[expectedNames_spat[0]])) # Validate start dates - if 'STARTDATES' in content.keys(): + if 'STARTDATES' in list(content.keys()): try: - content['STARTDATES'] = map(makeTimey, content['STARTDATES']) - except Exception, e: + content['STARTDATES'] = list(map(makeTimey, content['STARTDATES'])) + except Exception as e: raise ValueError('One or more startDate entries is not in YYYY-mm-dd format for ' + subEntry + ':' + str(e)) # Ensure dates within a subentry are unique @@ -253,9 +258,9 @@ def __init__(self, configFile): 'diurnalmetabolism': 'diurnMetab', 'fuelconsumption': 'fuelConsumption'} - expectedKeys_temporal = destinations_temporal.keys() + expectedKeys_temporal = list(destinations_temporal.keys()) expectedNames_temporal = ['profileFiles'] - missing = list(set(map(upper, expectedKeys_temporal)).difference(map(upper, ds.keys()))) + missing = list(set(map(upper, expectedKeys_temporal)).difference(list(map(upper, list(ds.keys()))))) if len(missing) > 0: raise ValueError('Temporal entries missing from ' + str(configFile) + ' in namelist: ' + str(missing)) @@ -263,16 +268,16 @@ def __init__(self, configFile): for entry in expectedKeys_temporal: content = ds[entry] # Validate sub-entries - missing = list(set(map(upper, expectedNames_temporal)).difference(map(upper, content.keys()))) + missing = list(set(map(upper, expectedNames_temporal)).difference(list(map(upper, list(content.keys()))))) if len(missing) > 0: raise ValueError('Entries missing from ' + entry + ' in namelist: ' + str(missing)) # Make sure everything is a list or everyting is a string - types = np.unique(map(type, content.values())) + types = np.unique(list(map(type, list(content.values())))) if len(types) > 1 and list in types: raise ValueError('The namelist entries for ' + entry + ' have inconsistent lengths: some are lists and some are not') #if list not in types: - for k in content.keys(): + for k in list(content.keys()): # Replace empties with None, and make lists if not lists already if len(content[k]) == 0: content[k] = None @@ -280,7 +285,7 @@ def __init__(self, configFile): content[k] = [content[k]] # Validate filenames - map(validFile, content['profileFiles']) + list(map(validFile, content['profileFiles'])) # Having gotten this far means the entries are valid, so populate the object field entries = getattr(self, destinations_temporal[entry]) for i in range(0, len(content['profileFiles']), 1): @@ -315,7 +320,7 @@ def validate_transport(inputDict): 'AADT_rigid', 'AADT_artic'] - missing = list(set(expectedNames).difference(inputDict.keys())) + missing = list(set(expectedNames).difference(list(inputDict.keys()))) if(len(missing) > 0): raise ValueError('Entries missing from transportData section of data sources file:' + str(missing)) road_class_entries = ['class_field', 'motorway_class', 'primary_class', 'secondary_class'] @@ -344,14 +349,14 @@ def validate_transport(inputDict): 'AADT_coach', 'AADT_rigid', 'AADT_artic'] - missing = list(set(mandatory_aadts).difference(inputDict.keys())) + missing = list(set(mandatory_aadts).difference(list(inputDict.keys()))) if len(missing) > 0: raise ValueError('The following AADT field names are missing from the transportData section of the data sources file: ' + str(missing)) # See if the conditionally optional fields exist combination_aadts = {'AADT_total_LGV': ['AADT_petrol_LGV', 'AADT_diesel_LGV'], 'AADT_total_car': ['AADT_petrol_car', 'AADT_diesel_car']} - for one in combination_aadts.keys(): + for one in list(combination_aadts.keys()): if type(inputDict[one]) is not str: # If the total value is not present, the other values must be for other in combination_aadts[one]: if type(inputDict[other]) is not str: @@ -360,16 +365,28 @@ def validate_transport(inputDict): if __name__=="__main__": a = DataSources('C:\Users\pn910202\.qgis2\python\plugins\GreaterQF\PythonQF2\GQFInputs\dataSources_working.nml') - print a.resPop_spat - print a.outputAreas_spat - print a.transport_spat - print a.transport_spat[0]['AADT_fields'] - print a.indGas_spat - print a.indElec_spat - print a.domGas_spat - print a.domElec_spat - print a.eco7_spat - print a.resPop_spat - print a.workPop_spat - print a.fuelConsumption + # fix_print_with_import + print(a.resPop_spat) + # fix_print_with_import + print(a.outputAreas_spat) + # fix_print_with_import + print(a.transport_spat) + # fix_print_with_import + print(a.transport_spat[0]['AADT_fields']) + # fix_print_with_import + print(a.indGas_spat) + # fix_print_with_import + print(a.indElec_spat) + # fix_print_with_import + print(a.domGas_spat) + # fix_print_with_import + print(a.domElec_spat) + # fix_print_with_import + print(a.eco7_spat) + # fix_print_with_import + print(a.resPop_spat) + # fix_print_with_import + print(a.workPop_spat) + # fix_print_with_import + print(a.fuelConsumption) diff --git a/GreaterQF/PythonQF2/GreaterQF.py b/GreaterQF/PythonQF2/GreaterQF.py index 1db0b80..a118102 100644 --- a/GreaterQF/PythonQF2/GreaterQF.py +++ b/GreaterQF/PythonQF2/GreaterQF.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object import os import pickle import re @@ -10,26 +14,26 @@ except: pass -from DataManagement.spatialHelpers import feature_areas, loadShapeFile, shapefile_attributes -from DataManagement.temporalHelpers import makeUTC +from .DataManagement.spatialHelpers import feature_areas, loadShapeFile, shapefile_attributes +from .DataManagement.temporalHelpers import makeUTC from pytz import timezone -from Calcs3 import QF -from DailyEnergyLoading import DailyEnergyLoading -from DailyFactors import DailyFact -from Disaggregate import disaggregate -from EnergyProfiles import EnergyProfiles # For temporal energy use profiles -from EnergyUseData import EnergyUseData # For spatially disaggregated energy use data -from FuelConsumption import FuelConsumption -from GQFDataSources import DataSources -from HumanActivityProfiles import HumanActivityProfiles -from Params import Params -from Partitions import Partitions -from Population import Population -from Transport import Transport -from TransportProfiles import TransportProfiles - - -class Model(): +from .Calcs3 import QF +from .DailyEnergyLoading import DailyEnergyLoading +from .DailyFactors import DailyFact +from .Disaggregate import disaggregate +from .EnergyProfiles import EnergyProfiles # For temporal energy use profiles +from .EnergyUseData import EnergyUseData # For spatially disaggregated energy use data +from .FuelConsumption import FuelConsumption +from .GQFDataSources import DataSources +from .HumanActivityProfiles import HumanActivityProfiles +from .Params import Params +from .Partitions import Partitions +from .Population import Population +from .Transport import Transport +from .TransportProfiles import TransportProfiles + + +class Model(object): ''' Class that encapsulates a GreaterQF model instance''' def __init__(self): # Define the subfolders that should be present after each model run @@ -278,7 +282,7 @@ def loadModelResults(self, path): if not os.path.exists(path): raise Exception('Model output directory ' + str(path) + ' not found') - for sub in self.subFolders.values(): + for sub in list(self.subFolders.values()): directory = os.path.join(path, sub) if not os.path.exists(directory): raise Exception('Chosen model output folder ' + str(path) + ' did not contain enough subfolders to be genuine') diff --git a/GreaterQF/PythonQF2/HumanActivityProfiles.py b/GreaterQF/PythonQF2/HumanActivityProfiles.py index 76c9fb5..c7e5a87 100644 --- a/GreaterQF/PythonQF2/HumanActivityProfiles.py +++ b/GreaterQF/PythonQF2/HumanActivityProfiles.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import map +from builtins import range +from builtins import object # Object that stores and retrieves diurnal cycles of metabolic activity profiles for different seasons and times of day for GreaterQF import os @@ -8,11 +12,11 @@ except: pass import pytz -from DataManagement.LookupLogger import LookupLogger -from DataManagement.TemporalProfileSampler import TemporalProfileSampler +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.TemporalProfileSampler import TemporalProfileSampler -class HumanActivityProfiles: +class HumanActivityProfiles(object): def __init__(self, city, use_uk_holidays, customHolidays = [], logger=LookupLogger()): ''' Instantiate :param city: string: City being modelled (in time zone format e.g. Europe/London) @@ -58,9 +62,9 @@ def dealWithInputFile(self, file): dl = pd.read_csv(file,skipinitialspace=True, header=None) # Should be 3x each season in header - if (len(dl.keys())-1)%3 != 0: + if (len(list(dl.keys()))-1)%3 != 0: raise ValueError('There must be 6 columns for each named season in ' + file) - rowHeaders = map(lower, dl[0][0:6]) + rowHeaders = list(map(lower, dl[0][0:6])) firstDataRow = 6 # Expect certain keywords @@ -94,10 +98,10 @@ def dealWithInputFile(self, file): # Try and get timezone set up try: - tz = pytz.timezone(dl[dl.keys()[1]][5]) + tz = pytz.timezone(dl[list(dl.keys())[1]][5]) except Exception: - raise ValueError('Invalid timezone "' + dl[dl.keys()[1]][5] + '" specified in ' + file + + raise ValueError('Invalid timezone "' + dl[list(dl.keys())[1]][5] + '" specified in ' + file + '. This should be of the form "UTC" or "Europe/London" as per python timezone documentation') # Go through in sextouplets gathering up data for a template week @@ -105,7 +109,7 @@ def dealWithInputFile(self, file): try: sd = pd.datetime.strptime(dl[seasonStart][3], '%Y-%m-%d') ed = pd.datetime.strptime(dl[seasonStart][4], '%Y-%m-%d') - except Exception, e: + except Exception as e: raise Exception('Rows 4 and 5 ' + file + ' must be dates in the format YYYY-mm-dd') sd = tz.localize(sd) diff --git a/GreaterQF/PythonQF2/Params.py b/GreaterQF/PythonQF2/Params.py index f783bb0..a6a376b 100644 --- a/GreaterQF/PythonQF2/Params.py +++ b/GreaterQF/PythonQF2/Params.py @@ -1,3 +1,7 @@ +from builtins import map +from builtins import str +from builtins import range +from builtins import object # Object that loads and stores GreaterQF parameters, given a namelist file from datetime import datetime as dt from datetime import timedelta as timedelta @@ -5,11 +9,11 @@ from string import lower import pytz -class Params: +class Params(object): def __init__(self, paramsFile): try: PARAMS = nml.read(paramsFile) - except Exception,e: + except Exception as e: raise ValueError('Could not process params file ' + paramsFile + ': ' + str(e)) self.inputFile = paramsFile @@ -32,7 +36,7 @@ def __init__(self, paramsFile): self.fuelFractions = {} # Fraction of each vehicle type that's petrol and diesel expectedSections = ['params', 'waterHeatingFractions', 'roadAADTs', 'roadSpeeds', 'vehicleFractions', 'heatOfCombustion', 'petrolDieselFractions'] - missingSections = list(set(map(lower, expectedSections)).difference(map(lower, PARAMS.keys()))) + missingSections = list(set(map(lower, expectedSections)).difference(list(map(lower, list(PARAMS.keys()))))) if len(missingSections) > 0: raise ValueError('The parameters file ' + paramsFile + ' is missing the following sections: ' + str(missingSections)) @@ -74,20 +78,20 @@ def latentPartitioning(self, PARAMS): def roadData(self, PARAMS, paramsFile): # Road AADTs road_types = ['motorway', 'primary_road', 'secondary_road', 'other'] - missing = list(set(road_types).difference(PARAMS['roadAADTs'].keys())) + missing = list(set(road_types).difference(list(PARAMS['roadAADTs'].keys()))) if len(missing) == 0: self.roadAADTs = PARAMS['roadAADTs'] else: - raise ValueError('Entry "roadAADTs" in parameters file should contain entries ' + str(road_types) + ' but contains ' + str(PARAMS['roadAADTs'].keys())) + raise ValueError('Entry "roadAADTs" in parameters file should contain entries ' + str(road_types) + ' but contains ' + str(list(PARAMS['roadAADTs'].keys()))) # Road speeds - missing = list(set(road_types).difference(PARAMS['roadSpeeds'].keys())) + missing = list(set(road_types).difference(list(PARAMS['roadSpeeds'].keys()))) if len(missing) == 0: self.roadSpeeds = PARAMS['roadSpeeds'] else: raise ValueError('Entry "roadSpeeds" in parameters file should contain entries ' + str(road_types) + ' but contains ' + str(PARAMS['roadSpeeds'])) - if 'vehicleage' not in PARAMS['params'].keys(): - raise ValueError('Entry "params" in parameters file should contain "vehicleAge" parameter for vehicle age' + str(PARAMS['params'].keys())) + if 'vehicleage' not in list(PARAMS['params'].keys()): + raise ValueError('Entry "params" in parameters file should contain "vehicleAge" parameter for vehicle age' + str(list(PARAMS['params'].keys()))) else: try: self.vehicleAge = timedelta(days = 365 * float(PARAMS['params']['vehicleage'])) @@ -96,14 +100,14 @@ def roadData(self, PARAMS, paramsFile): # Vehicle fractions required_fractions = ['motorway', 'primary_road', 'secondary_road', 'other'] - missing = list(set(required_fractions).difference(PARAMS['vehicleFractions'].keys())) + missing = list(set(required_fractions).difference(list(PARAMS['vehicleFractions'].keys()))) if len(missing) == 0: veh_frac_headings = ['car', 'lgv', 'motorcycle', 'taxi', 'bus', 'rigid', 'artic'] # Each of the entries of PARAMS['vehicleFractions'] is a list corresponding to vehicle_fraction_headings # Loop around and build the dictionary of values - for fractionType in PARAMS['vehicleFractions'].keys(): + for fractionType in list(PARAMS['vehicleFractions'].keys()): if len(PARAMS['vehicleFractions'][fractionType]) < len(veh_frac_headings): raise ValueError('"' + fractionType + '" under "vehicleFractions" in parameters file has only ' + @@ -116,7 +120,7 @@ def roadData(self, PARAMS, paramsFile): # Petrol and diesel fuel fractions expectedVehicles = ['motorcycle', 'taxi', 'car', 'bus', 'lgv', 'rigid', 'artic'] - missingVehicles = set(expectedVehicles).difference(PARAMS['petrolDieselFractions'].keys()) + missingVehicles = set(expectedVehicles).difference(list(PARAMS['petrolDieselFractions'].keys())) if len(missingVehicles) > 0: raise ValueError(paramsFile + ' is missing the following vehicles: ' + str(missingVehicles)) @@ -130,7 +134,7 @@ def roadData(self, PARAMS, paramsFile): def hoc(self, PARAMS, paramsFile): # Heat of combustion. Read in [MJ/kg] and convert to [J/kg] expectedFuels = ['gas', 'petrol', 'diesel', 'crude_oil'] - missingFuels = set(expectedFuels).difference(PARAMS['heatOfCombustion'].keys()) + missingFuels = set(expectedFuels).difference(list(PARAMS['heatOfCombustion'].keys())) if len(missingFuels) > 0: raise ValueError(paramsFile + ' is missing the following entries: ' + str(missingFuels)) @@ -149,5 +153,5 @@ def bankHolidays(self, PARAMS, paramsFile): for hol in PARAMS['params']['custom_holidays']: try: self.customHolidays.append(dt.strptime(hol, '%Y-%m-%d').date()) - except Exception, e: + except Exception as e: raise ValueError('Invalid custom holiday "' + str(hol) + '" specified. Must be in format YYYY-mm-dd') \ No newline at end of file diff --git a/GreaterQF/PythonQF2/Partitions.py b/GreaterQF/PythonQF2/Partitions.py index 06c790f..b4c74f2 100644 --- a/GreaterQF/PythonQF2/Partitions.py +++ b/GreaterQF/PythonQF2/Partitions.py @@ -1,5 +1,6 @@ +from builtins import object import csv -class Partitions: +class Partitions(object): ''' Class that stores latent, sensible and (where applicable), wastewater partitioning coefficients for each QF component. This reflects the model configuration, with certain components all set to 0 if they are switched off. NB "switched off" @@ -65,26 +66,26 @@ def __init__(self, conf, params): self.wasteWater['industrial']['crude_oil'] = (1 - self.latent['crude_oil']) * params.waterHeatFract['industrial']['crude_oil']* params.heaterEffic['gas'] # Set some or all of these components to 0 based on model run configuration - for k in self.wasteWater.keys(): - for c in self.wasteWater[k].keys(): + for k in list(self.wasteWater.keys()): + for c in list(self.wasteWater[k].keys()): self.wasteWater[k][c] = conf.wastewater_qf * self.wasteWater[k][c] - for k in self.sensible.keys(): + for k in list(self.sensible.keys()): if type(self.sensible[k]) is dict: - for c in self.sensible[k].keys(): + for c in list(self.sensible[k].keys()): self.sensible[k][c] = conf.sensible_qf * self.sensible[k][c] else: self.sensible[k] = conf.sensible_qf * self.sensible[k] - for k in self.latent.keys(): + for k in list(self.latent.keys()): self.latent[k] = conf.latent_qf * self.latent[k] # As a slightly redundant shortcut, what is the total flux for each fuel and/or sector after the user choices are acconuted for? self.fluxProp = {} - for k in self.sensible.keys(): + for k in list(self.sensible.keys()): if type(self.sensible[k]) is dict: self.fluxProp[k] = {} - for c in self.sensible[k].keys(): + for c in list(self.sensible[k].keys()): self.fluxProp[k][c] = self.wasteWater[k][c] + self.sensible[k][c] + self.latent[c] else: self.fluxProp[k] = self.sensible[k] + self.latent[k] \ No newline at end of file diff --git a/GreaterQF/PythonQF2/Population.py b/GreaterQF/PythonQF2/Population.py index 6ae68a5..30fd7ee 100644 --- a/GreaterQF/PythonQF2/Population.py +++ b/GreaterQF/PythonQF2/Population.py @@ -1,9 +1,12 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import object from datetime import datetime -from DataManagement.LookupLogger import LookupLogger -from DataManagement.SpatialTemporalResampler import SpatialTemporalResampler -from DataManagement.spatialHelpers import * -from PyQt4.QtCore import QSettings -class Population: +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.SpatialTemporalResampler import SpatialTemporalResampler +from .DataManagement.spatialHelpers import * +from qgis.PyQt.QtCore import QSettings +class Population(object): # Store spatially and temporally resolved residential and workday population # Provides population density for each feateure # Makes heavy use of QGIS API @@ -172,6 +175,7 @@ def testIt(): a.setWorkPop(wp['shapefile'], wp['start_date'], wp['field_to_use'], epsgCode=wp['epsg']) # Get downscaled shapefiles for 2014 - print a.getPopTable(datetime.strptime('2013-01-01', '%Y-%m-%d')) + # fix_print_with_import + print(a.getPopTable(datetime.strptime('2013-01-01', '%Y-%m-%d'))) return a.getResPopLayer(datetime.strptime('2014-01-01', '%Y-%m-%d')) diff --git a/GreaterQF/PythonQF2/TimeProfiles.py b/GreaterQF/PythonQF2/TimeProfiles.py index 30142f1..b0c9835 100644 --- a/GreaterQF/PythonQF2/TimeProfiles.py +++ b/GreaterQF/PythonQF2/TimeProfiles.py @@ -8,6 +8,7 @@ WhatSeason - Finds season, returns integer. DateDiff - Finds the difference in days between two dates. ''' +from builtins import range import datetime as dt from datetime import date as dtd from datetime import timedelta @@ -26,37 +27,37 @@ def easterLookup(year): # Hard coded because parliament didn't set a constant date # Easter Monday bank holidays easters = {} - easters[2000] = [dtd(2000, 04, 24)] - easters[2001] = [dtd(2001, 04, 16)] - easters[2002] = [dtd(2002, 04, 1)] - easters[2003] = [dtd(2003, 04, 21)] - easters[2004] = [dtd(2004, 04, 12)] - easters[2005] = [dtd(2005, 03, 28)] - easters[2006] = [dtd(2006, 04, 17)] - easters[2007] = [dtd(2007, 04, 9)] - easters[2008] = [dtd(2008, 03, 24)] - easters[2009] = [dtd(2009, 04, 13)] - easters[2010] = [dtd(2010, 04, 05)] - easters[2011] = [dtd(2011, 04, 25)] - easters[2012] = [dtd(2012, 04, 9)] - easters[2013] = [dtd(2013, 04, 01)] - easters[2014] = [dtd(2014, 04, 21)] - easters[2015] = [dtd(2015, 04, 06)] - easters[2016] = [dtd(2016, 03, 28)] - easters[2017] = [dtd(2017, 04, 17)] - easters[2018] = [dtd(2018, 04, 02)] - easters[2019] = [dtd(2019, 04, 22)] - easters[2020] = [dtd(2020, 04, 13)] + easters[2000] = [dtd(2000, 0o4, 24)] + easters[2001] = [dtd(2001, 0o4, 16)] + easters[2002] = [dtd(2002, 0o4, 1)] + easters[2003] = [dtd(2003, 0o4, 21)] + easters[2004] = [dtd(2004, 0o4, 12)] + easters[2005] = [dtd(2005, 0o3, 28)] + easters[2006] = [dtd(2006, 0o4, 17)] + easters[2007] = [dtd(2007, 0o4, 9)] + easters[2008] = [dtd(2008, 0o3, 24)] + easters[2009] = [dtd(2009, 0o4, 13)] + easters[2010] = [dtd(2010, 0o4, 0o5)] + easters[2011] = [dtd(2011, 0o4, 25)] + easters[2012] = [dtd(2012, 0o4, 9)] + easters[2013] = [dtd(2013, 0o4, 0o1)] + easters[2014] = [dtd(2014, 0o4, 21)] + easters[2015] = [dtd(2015, 0o4, 0o6)] + easters[2016] = [dtd(2016, 0o3, 28)] + easters[2017] = [dtd(2017, 0o4, 17)] + easters[2018] = [dtd(2018, 0o4, 0o2)] + easters[2019] = [dtd(2019, 0o4, 22)] + easters[2020] = [dtd(2020, 0o4, 13)] # Can programatically get Good Friday from the above - for y in easters.keys(): + for y in list(easters.keys()): easters[y].append(easters[y][0] - timedelta(3)) return easters[year] def generateHolidays(firstYear, finalYear): # UK public holidays (fixed points or easily defined) between specified years (inclusive) - years = range(firstYear, finalYear + 1, 1) + years = list(range(firstYear, finalYear + 1, 1)) allHolidays = [] for y in years: allHolidays.extend(holidaysForYear(y)) @@ -68,10 +69,10 @@ def holidaysForYear(year): # Christmas day/boxing day falling on weekend isn't included (assumed standard weekend) holidays = [] # New year: - holidays.append(dtd(year, 01, 01)) + holidays.append(dtd(year, 0o1, 0o1)) # If 2 or 3 january is a monday, this is the bank holiday - jan2 = dtd(year, 01, 02) - jan3 = dtd(year, 01, 03) + jan2 = dtd(year, 0o1, 0o2) + jan3 = dtd(year, 0o1, 0o3) if jan2.weekday() == 0: holidays.append(jan2) if jan3.weekday() == 0: @@ -80,7 +81,7 @@ def holidaysForYear(year): # Get easter monday and friday bank holidays from lookup function holidays.extend(easterLookup(year)) # Early and late may - may1 = dtd(year, 05, 01) + may1 = dtd(year, 0o5, 0o1) may1 = may1 if may1.weekday() is 0 else may1 + timedelta(7 - may1.weekday()) holidays.append(may1) holidays.append(dtd(year, 5, 31) - timedelta(dtd(year, 5, 31).weekday())) diff --git a/GreaterQF/PythonQF2/Transport.py b/GreaterQF/PythonQF2/Transport.py index beccee0..cc7115e 100644 --- a/GreaterQF/PythonQF2/Transport.py +++ b/GreaterQF/PythonQF2/Transport.py @@ -1,12 +1,16 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map +from builtins import object from datetime import datetime as dt from string import lower -from DataManagement.LookupLogger import LookupLogger -from DataManagement.SpatialTemporalResampler import SpatialTemporalResampler -from DataManagement.spatialHelpers import * +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.SpatialTemporalResampler import SpatialTemporalResampler +from .DataManagement.spatialHelpers import * from qgis.core import QgsSpatialIndex from shutil import rmtree -class Transport(): +class Transport(object): '''Produce transport energy data based on input vector map of road segments, along with traffic data''' def __init__(self, fuelConsumption, modelParams, logger=LookupLogger()): @@ -64,7 +68,7 @@ def validateInputs(self, startTime, shapefile, epsgCode, roadTypeField, roadType raise ValueError('Name of attribute containing road classification must be a string') # Check the list of identifiers for road types contain all three types - matchingRoadClasses = set(self.roadTypes).difference(roadTypeNames.keys()) + matchingRoadClasses = set(self.roadTypes).difference(list(roadTypeNames.keys())) if len(matchingRoadClasses) > 0: raise ValueError('Dict containing road class identifiers must have keys: ' + str(self.roadTypes)) @@ -78,8 +82,8 @@ def injectFuelConsumption(self, filename, startDate, epsgCode): :param epsgCode: EPSG code of shapefile :return: QgsVectorLayer of the shapefile provided ''' - allFuelFields = self.dieselNames.values() - allFuelFields.extend(self.petrolNames.values()) + allFuelFields = list(self.dieselNames.values()) + allFuelFields.extend(list(self.petrolNames.values())) return self.transport.injectInput(filename, epsgCode, allFuelFields, startDate) def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTypeNames, inputIdField, speedDataField=None, speedConversionFactor=None, totalAADTField=None, vAADTFields=None): @@ -129,15 +133,15 @@ def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTy if type(vAADTFields) is dict: allowedKeys1 = self.completeInputs allowedKeys2 = self.modelledTypes - missingFrom1 = list(set(allowedKeys1).difference(vAADTFields.keys())) - missingFrom2 = list(set(allowedKeys2).difference(vAADTFields.keys())) + missingFrom1 = list(set(allowedKeys1).difference(list(vAADTFields.keys()))) + missingFrom2 = list(set(allowedKeys2).difference(list(vAADTFields.keys()))) if len(missingFrom1) == 0: completeInputAADTprovided = True elif len(missingFrom2) == 0: modelledTypesAADTprovided = True else: - raise ValueError('The vehicle AADT field names provided are incomplete. Expected: ' + str(allowedKeys1) + ' OR ' + str(allowedKeys2) + '. Got: ' + str(vAADTFields.keys())) - fieldsToSample.extend(vAADTFields.values()) + raise ValueError('The vehicle AADT field names provided are incomplete. Expected: ' + str(allowedKeys1) + ' OR ' + str(allowedKeys2) + '. Got: ' + str(list(vAADTFields.keys()))) + fieldsToSample.extend(list(vAADTFields.values())) # Make a copy of the shapefile in a temp folder (we wish to change it) # Ensure the input layer has the right projection when it gets there @@ -157,7 +161,7 @@ def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTy fieldNames = {a.name(): i for i, a in enumerate(inputLayer.dataProvider().fields())} # Check that the requested field names are actually present in the layer - missingFields = list(set(fieldsToSample).difference(fieldNames.keys())) + missingFields = list(set(fieldsToSample).difference(list(fieldNames.keys()))) if len(missingFields) > 0: raise ValueError('Some of the transport shapefile fields referenced were not found in the shapefile:' + str(missingFields)) @@ -179,7 +183,7 @@ def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTy # Get translation to look up internal feature ID based on our preferred ID field t = shapefile_attributes(outputLayer)[self.transport.templateIdField] - featureMapper = pd.Series(index=map(intOrString, t.values), data=map(intOrString, t.index)) + featureMapper = pd.Series(index=list(map(intOrString, t.values)), data=list(map(intOrString, t.index))) t = None # Convert road lengths and AADT data to total fuel use each day on each segment of road @@ -191,8 +195,8 @@ def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTy fuelUseData = fuelUseDict['fuelUse'] fuelUseNames = fuelUseDict['names'] - allFuelFields = fuelUseNames['petrol'].values() - allFuelFields.extend(fuelUseNames['diesel'].values()) + allFuelFields = list(fuelUseNames['petrol'].values()) + allFuelFields.extend(list(fuelUseNames['diesel'].values())) # Get road segment lengths inside each output polygon, along with attributes of each of these intersected segments intersectedLines = intersecting_amounts([], inputIndex, inputLayer, outputLayer, inputIdField, self.transport.templateIdField) @@ -208,8 +212,8 @@ def addTransportData(self, shapefile, startTime, epsgCode, roadTypeField, roadTy # to disaggregate fuel use into each output feature, and to calculate total fuel use in each output feature areas = self.transport.getAreas() outputLayer.startEditing() - fuelConsumption = pd.DataFrame(index = intersectedLines.keys(), columns = newFieldIndices) # Results container for each feature - for outfeat_id in intersectedLines.keys(): + fuelConsumption = pd.DataFrame(index = list(intersectedLines.keys()), columns = newFieldIndices) # Results container for each feature + for outfeat_id in list(intersectedLines.keys()): fuelConsumption[:].loc[outfeat_id] = 0 if len(intersectedLines[outfeat_id]) > 0: diff --git a/GreaterQF/PythonQF2/TransportEnergyData.py b/GreaterQF/PythonQF2/TransportEnergyData.py index 6af6af6..50892bf 100644 --- a/GreaterQF/PythonQF2/TransportEnergyData.py +++ b/GreaterQF/PythonQF2/TransportEnergyData.py @@ -1,11 +1,14 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import object from datetime import datetime -from DataManagement.LookupLogger import LookupLogger -from DataManagement.SpatialTemporalResampler import SpatialTemporalResampler -from DataManagement.spatialHelpers import * -from PyQt4.QtCore import QSettings +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.SpatialTemporalResampler import SpatialTemporalResampler +from .DataManagement.spatialHelpers import * +from qgis.PyQt.QtCore import QSettings -class TransportEnergyData: +class TransportEnergyData(object): # Read in raw transport inputs (a mixture of single values, spatial polygons and point counts) # and produce energy estimates in spatial units as specified by a template shapefile @@ -123,5 +126,6 @@ def testIt(): trans['start_date'] = datetime.strptime('2008-01-01', '%Y-%m-%d') a.setEnergy(trans['shapefile'], trans['start_date'], trans['field_to_use'], epsgCode=trans['epsg']) # Get downscaled shapefiles for 2014 - print a.getEnergyTable(datetime.strptime('2013-01-01', '%Y-%m-%d')) + # fix_print_with_import + print(a.getEnergyTable(datetime.strptime('2013-01-01', '%Y-%m-%d'))) diff --git a/GreaterQF/PythonQF2/TransportProfiles.py b/GreaterQF/PythonQF2/TransportProfiles.py index 8e36e1d..515e600 100644 --- a/GreaterQF/PythonQF2/TransportProfiles.py +++ b/GreaterQF/PythonQF2/TransportProfiles.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import map +from builtins import str +from builtins import object # Object that stores and retrieves diurnal cycles of road traffic for different seasons and times of day for GreaterQF # An energy profile is a week-long template of relative energy use that changes only with season @@ -9,10 +13,10 @@ except: pass import pytz -from DataManagement.LookupLogger import LookupLogger -from DataManagement.TemporalProfileSampler import TemporalProfileSampler +from .DataManagement.LookupLogger import LookupLogger +from .DataManagement.TemporalProfileSampler import TemporalProfileSampler -class TransportProfiles: +class TransportProfiles(object): def __init__(self, city, use_uk_holidays, customHolidays = [], logger=LookupLogger()): ''' Instantiate :param city: String specifying the city being modelled (must be in timezone format e.g. Europe/London) @@ -105,13 +109,13 @@ def dealWithInputFile(self, file): dl = pd.read_csv(file,skipinitialspace=True) # Should be 3x each season in header - if len(dl.keys()) != 8: + if len(list(dl.keys())) != 8: raise ValueError('There must be 8 columns in ' + file) - dl.columns = map(string.lower, dl.columns.tolist()) + dl.columns = list(map(string.lower, dl.columns.tolist())) expectedHeadings = ['motorcycles', 'taxis', 'cars', 'buses', 'lgvs', 'rigids', 'artics'] - matches = list(set(dl.keys()[1:]).intersection(expectedHeadings)) - if len(matches) != len(dl.keys())-1: + matches = list(set(list(dl.keys())[1:]).intersection(expectedHeadings)) + if len(matches) != len(list(dl.keys()))-1: raise ValueError('Top row of transport diurnal profiles must contain each of: ' + str(expectedHeadings)) firstDataRow = 3 @@ -130,9 +134,9 @@ def dealWithInputFile(self, file): # Try to extract the timezone from the file header try: - tz = pytz.timezone(dl[dl.keys()[1]][2]) + tz = pytz.timezone(dl[list(dl.keys())[1]][2]) except Exception: - raise ValueError('Invalid timezone "' + dl[dl.keys()[1]][2] + '" specified in ' + file + + raise ValueError('Invalid timezone "' + dl[list(dl.keys())[1]][2] + '" specified in ' + file + '. This should be of the form "UTC" or "Europe/London" as per python timezone documentation') # Step through the list of transport types, adding the profiles @@ -140,7 +144,7 @@ def dealWithInputFile(self, file): try: sd = pd.datetime.strptime(dl[dl.columns[1]][0], '%Y-%m-%d') ed = pd.datetime.strptime(dl[dl.columns[1]][1], '%Y-%m-%d') - except Exception, e: + except Exception as e: raise Exception('Second column of Rows 2 and 3 of ' + file + ' must be dates in the format YYYY-mm-dd') sd = tz.localize(sd) diff --git a/GreaterQF/greater_qf.py b/GreaterQF/greater_qf.py index 34e3fd9..0d32e6c 100644 --- a/GreaterQF/greater_qf.py +++ b/GreaterQF/greater_qf.py @@ -20,19 +20,23 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon from qgis.gui import QgsMessageBar -from greater_qf_dialog import GreaterQFDialog +from .greater_qf_dialog import GreaterQFDialog from datetime import timedelta from datetime import datetime as dt import os.path import webbrowser # GQF specific code -from PythonQF2.Config import Config -from PythonQF2.GreaterQF import Model -from time_displayer import time_displayer +from .PythonQF2.Config import Config +from .PythonQF2.GreaterQF import Model +from .time_displayer import time_displayer try: import pandas as pd @@ -40,7 +44,7 @@ except: pass -class GreaterQF: +class GreaterQF(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -73,7 +77,7 @@ def __init__(self, iface): try: import pandas import matplotlib as plt - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'GQF requires the pandas and matplotlib packages to be installed. Please consult the manual for further information') return @@ -131,7 +135,7 @@ def chooseProcessedDataPath(self): # Check for manifest file or reject try: self.model.setPreProcessedInputFolder(selectedFolder) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error setting processed data path', str(e)) return self.dlg.txtProcessedDataPath.setText(selectedFolder) @@ -158,7 +162,7 @@ def dataSources(self): df = a.selectedFiles() try: self.model.setDataSources(df[0]) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Invalid Data Sources file provided', str(e)) return @@ -174,7 +178,7 @@ def disaggregate(self): self.model.setPreProcessedInputFolder(processed) def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/processor/Urban%20Energy%20Balance%20GQ.html" + url = "http://urban-climate.net/umep/UMEP_Manual#Urban_Energy_Balance:_GQF" webbrowser.open_new_tab(url) def visualise(self): @@ -195,7 +199,7 @@ def loadParams(self): cf = a.selectedFiles() try: self.model.setParameters(cf[0]) - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Invalid parameters file', str(e)) return @@ -214,7 +218,7 @@ def loadResults(self): # Check for manifest file or reject try: locations = self.model.loadModelResults(selectedFolder) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error loading previous model results', str(e)) return @@ -231,14 +235,14 @@ def loadResults(self): try: self.model.setDataSources(locations['dsFile']) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error loading previous model data sources', str(e) + '. Re-runs not available') self.dlg.cmdRunCancel.setEnabled(False) self.dlg.txtParams.setText(locations['paramsFile']) try: self.model.setParameters(locations['paramsFile']) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error loading previous model configuration', str(e) + '. Re-runs not available') self.dlg.cmdRunCancel.setEnabled(False) @@ -406,7 +410,7 @@ def startWorker(self): self.dlg.cmdLoadResults.clicked.disconnect() self.dlg.cmdLoadResults.clicked.connect(self.reset, Qt.UniqueConnection) self.dlg.cmdLoadResults.setText('Clear data') - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error running GQF', str(e)) self.dlg.cmdRunCancel.setEnabled(True) diff --git a/GreaterQF/greater_qf_dialog.py b/GreaterQF/greater_qf_dialog.py index 28fdb15..899f08e 100644 --- a/GreaterQF/greater_qf_dialog.py +++ b/GreaterQF/greater_qf_dialog.py @@ -23,14 +23,15 @@ import os -from PyQt4 import QtGui, uic, QtCore +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'greater_qf_dialog_base.ui')) -class GreaterQFDialog(QtGui.QDialog, FORM_CLASS): +class GreaterQFDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(GreaterQFDialog, self).__init__(parent) diff --git a/GreaterQF/time_displayer.py b/GreaterQF/time_displayer.py index 2bf458d..1fc3a1f 100644 --- a/GreaterQF/time_displayer.py +++ b/GreaterQF/time_displayer.py @@ -1,11 +1,15 @@ -from PyQt4 import QtGui, uic -from PyQt4.QtGui import QListWidgetItem -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import absolute_import +from builtins import map +from builtins import str +from qgis.PyQt import QtGui, uic +from qgis.PyQt.QtWidgets import QListWidgetItem +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon import os FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'time_displayer.ui')) -from qgis.core import QgsMessageLog, QgsMapLayerRegistry, QgsVectorLayer, QgsMapRenderer, QgsRectangle -from PythonQF2.DataManagement.spatialHelpers import populateShapefileFromTemplate, colourRanges, openShapeFileInMemory, duplicateVectorLayer +from qgis.core import QgsMapRendererJob, QgsRectangle, QgsProject +from .PythonQF2.DataManagement.spatialHelpers import populateShapefileFromTemplate, colourRanges, openShapeFileInMemory, duplicateVectorLayer try: import pandas as pd import numpy as np @@ -15,8 +19,8 @@ pass from datetime import datetime as dt -from PyQt4.QtGui import QImage, QColor, QPainter -from PyQt4.QtCore import QSize +from qgis.PyQt.QtGui import QImage, QColor, QPainter +from qgis.PyQt.QtCore import QSize def makeRect(color): return Rectangle((0,0), 1, 1,fc=color) @@ -73,7 +77,7 @@ def makeTimeseries(self): :return: None ''' id = self.lstAreas.currentItem().text() - result = self.model.fetchResultsForLocation(intOrString(id), dt(1900,01,01), dt(2200,01,01)) + result = self.model.fetchResultsForLocation(intOrString(id), dt(1900,0o1,0o1), dt(2200,0o1,0o1)) pyplot.rcParams["font.family"] = "arial" fig = pyplot.figure(id, facecolor='white',figsize=(8,17), dpi=80) @@ -92,7 +96,7 @@ def makeTimeseries(self): linewidth=0, colors=[b_col, t_col, m_col]) pyplot.title('Total', fontsize=titletext) - rects1 = map(makeRect, [b_col, t_col, m_col]) + rects1 = list(map(makeRect, [b_col, t_col, m_col])) a1.legend(rects1, ('Bldg', 'Tran', 'Met'), loc='best', fontsize=legendtext) a1.set_ylim((0, a1.get_ylim()[1])) @@ -116,7 +120,7 @@ def makeTimeseries(self): linewidth=0, colors=colourOrder2) pyplot.title('Buildings', fontsize=titletext) - rects2 = map(makeRect, colourOrder2) + rects2 = list(map(makeRect, colourOrder2)) a2.legend(rects2, ('Dom E', 'Dom G', 'Eco 7', 'Ind E', 'Ind G', 'Oth'), loc='best', fontsize=legendtext) a2.set_ylim((0, a2.get_ylim()[1])) pyplot.ylabel('W m-2', fontsize=titletext) @@ -146,7 +150,7 @@ def makeTimeseries(self): # Set X limit to accommodate legend pyplot.title('Transport', fontsize=titletext) - rects3 = map(makeRect, colourOrder3) + rects3 = list(map(makeRect, colourOrder3)) a3.legend(rects3, ('Moto', 'Taxi', 'Car', 'Bus', 'LGV', 'Rigid', 'Artic'), loc='best', fontsize=legendtext) pyplot.ylabel('$W m^{-2}$', fontsize=titletext) @@ -166,7 +170,7 @@ def populateTimeList(self): :return: ''' def toString(x): return x.strftime('%Y-%m-%d %H:%M') - timeLabels = map(toString, self.model.getTimeSteps()) + timeLabels = list(map(toString, self.model.getTimeSteps())) for label in timeLabels: time = QListWidgetItem(label) self.lstTimes.addItem(time) @@ -198,13 +202,13 @@ def updateDisplay(self): range_maxima = [0.000001, 0.1, 1, 10, 100, 1000] colours = ['#CECECE', '#FEE6CE', '#FDAE6B', '#F16913', '#D94801', '#7F2704'] opacity = 1 - for component in self.componentTranslation.values(): + for component in list(self.componentTranslation.values()): layerName = component + t.strftime(' %Y-%m-%d %H:%M UTC') - if component == self.componentTranslation.values()[0]: + if component == list(self.componentTranslation.values())[0]: colourRanges(new_layer, component, opacity, range_minima, range_maxima, colours) new_layer.setLayerName(layerName) layerId = new_layer.id() - QgsMapLayerRegistry.instance().addMapLayer(new_layer) + QgsProject.instance().addMapLayer(new_layer) proportion = new_layer.extent().height() / new_layer.extent().width() else: @@ -213,7 +217,7 @@ def updateDisplay(self): layer.setLayerName(layerName) colourRanges(layer, component, opacity, range_minima, range_maxima, colours) layerId = layer.id() - QgsMapLayerRegistry.instance().addMapLayer(layer) + QgsProject.instance().addMapLayer(layer) proportion = layer.extent().height() / layer.extent().width() @@ -237,7 +241,7 @@ def updateDisplay(self): p.begin(img) p.setRenderHint(QPainter.Antialiasing) - render = QgsMapRenderer() + render = QgsMapRendererJob() # set layer set lst = [layerId] # add ID of every layer diff --git a/ImageMorphParam/image_morph_param.py b/ImageMorphParam/image_morph_param.py index 2b3b1e0..e636bf2 100644 --- a/ImageMorphParam/image_morph_param.py +++ b/ImageMorphParam/image_morph_param.py @@ -20,21 +20,28 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QThread, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +# from builtins import str +from builtins import range +# from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QThread, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon from qgis.gui import * from qgis.core import * import os from osgeo import gdal from ..Utilities.imageMorphometricParms_v1 import * -from impgworker import Worker -from image_morph_param_dialog import ImageMorphParamDialog +from .impgworker import Worker +from .image_morph_param_dialog import ImageMorphParamDialog import webbrowser # Initialize Qt resources from file resources.py # import resources_rc -class ImageMorphParam: +class ImageMorphParam(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -72,8 +79,10 @@ def __init__(self, iface): self.dlg.checkBoxOnlyBuilding.toggled.connect(self.text_enable) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) for i in range(1, 25): if 360 % i == 0: @@ -95,9 +104,6 @@ def __init__(self, iface): # self.toolbar = self.iface.addToolBar(u'ImageMorphParam') # self.toolbar.setObjectName(u'ImageMorphParam') - # self.layerComboManagerPolygrid = VectorLayerCombo(self.dlg.comboBox_Polygrid) - # fieldgen = VectorLayerCombo(self.dlg.comboBox_Polygrid, initLayer="", options={"geomType": QGis.Polygon}) - # self.layerComboManagerPolyField = FieldCombo(self.dlg.comboBox_Field, fieldgen, initField="") self.layerComboManagerPolygrid = QgsMapLayerComboBox(self.dlg.widgetPolygrid) self.layerComboManagerPolygrid.setCurrentIndex(-1) self.layerComboManagerPolygrid.setFilters(QgsMapLayerProxyModel.PolygonLayer) @@ -106,12 +112,6 @@ def __init__(self, iface): self.layerComboManagerPolyField .setFilters(QgsFieldProxyModel.Numeric) self.layerComboManagerPolygrid .layerChanged.connect(self.layerComboManagerPolyField.setLayer) - # self.layerComboManagerDSMbuildground = RasterLayerCombo(self.dlg.comboBox_DSMbuildground) - # RasterLayerCombo(self.dlg.comboBox_DSMbuildground, initLayer="") - # self.layerComboManagerDEM = RasterLayerCombo(self.dlg.comboBox_DEM) - # RasterLayerCombo(self.dlg.comboBox_DEM, initLayer="") - # self.layerComboManagerDSMbuild = RasterLayerCombo(self.dlg.comboBox_DSMbuild) - # RasterLayerCombo(self.dlg.comboBox_DSMbuild, initLayer="") self.layerComboManagerDSMbuildground = QgsMapLayerComboBox(self.dlg.widgetDSMbuildground) self.layerComboManagerDSMbuildground.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerDSMbuildground.setFixedWidth(175) @@ -213,38 +213,31 @@ def text_enable(self): self.dlg.label_3.setEnabled(True) self.dlg.label_4.setEnabled(False) - # Metod som startar traden, knyter signaler fran traden till metoder. Se impgworker.py for det arbete som traden utfor. def startWorker(self, dsm, dem, dsm_build, poly, poly_field, vlayer, prov, fields, idx, dir_poly, iface, plugin_dir, folderPath, dlg, imid, radius, degree, rm): # create a new worker instance - # Skapar en instans av metoden som innehaller det arbete som ska utforas i en trad - worker = Worker(dsm, dem, dsm_build, poly, poly_field, vlayer, prov, fields, idx, dir_poly, iface, plugin_dir, folderPath, dlg, imid, radius, degree, rm) - # andrar knappen som startar verktyget till en knapp som avbryter tradens arbete. self.dlg.runButton.setText('Cancel') self.dlg.runButton.clicked.disconnect() self.dlg.runButton.clicked.connect(worker.kill) self.dlg.closeButton.setEnabled(False) - # Skapar en trad som arbetet fran worker ska utforas i. thread = QThread(self.dlg) worker.moveToThread(thread) - # kopplar signaler fran traden till metoder i denna "fil" + worker.finished.connect(self.workerFinished) worker.error.connect(self.workerError) worker.progress.connect(self.progress_update) thread.started.connect(worker.run) - # startar traden + thread.start() self.thread = thread self.worker = worker - # Metod som ar kopplad till en signal som Worker(traden) skickar nar den utfort sitt arbete, killed ar en Boolean som - # skiljer mellan om traden blev "fardig" for att den gjorde sitt arbete eller om den avbrots def workerFinished(self, ret): - # Tar bort arbetaren (Worker) och traden den kors i + try: self.worker.deleteLater() except RuntimeError: @@ -253,16 +246,13 @@ def workerFinished(self, ret): self.thread.wait() self.thread.deleteLater() - #andra tillbaka Run-knappen till sitt vanliga tillstand och skicka ett meddelande till anvanderen. if ret == 1: self.dlg.runButton.setText('Run') self.dlg.runButton.clicked.disconnect() self.dlg.runButton.clicked.connect(self.start_progress) self.dlg.closeButton.setEnabled(True) self.dlg.progressBar.setValue(0) - # QMessageBox.information(None, "Image Morphometric Parameters", - # "Process finished! Check General Messages (speech bubble, lower left) " - # "to obtain information of the process.") + self.iface.messageBar().pushMessage("Image Morphometric Parameters", "Process finished! Check General Messages (speech bubble, lower left) " "to obtain information of the process.", duration=5) @@ -275,20 +265,14 @@ def workerFinished(self, ret): QMessageBox.information(None, "Image Morphometric Parameters", "Operations cancelled, " "process unsuccessful! See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.") - #Metod som tar emot en signal fran traden ifall nagot gick fel, felmeddelanden skrivs till QGIS message log. def workerError(self, errorstring): - #strerror = str(errorstring) - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) - #Metod som tar emot signaler koontinuerligt fran traden som berattar att ett berakningsframsteg gjorts, uppdaterar - #progressbar def progress_update(self): self.steps +=1 self.dlg.progressBar.setValue(self.steps) - #Metoden som kors genom run-knappen, precis som tidigare. def start_progress(self): - #Steg for uppdatering av self.steps = 0 poly = self.layerComboManagerPolygrid.currentLayer() if poly is None: @@ -302,17 +286,15 @@ def start_progress(self): if poly_field is None: QMessageBox.critical(self.dlg, "Error", "An attribute filed with unique fields must be selected") return - # QMessageBox.information(None, "TEst", str(poly_field) ) vlayer = QgsVectorLayer(poly.source(), "polygon", "ogr") prov = vlayer.dataProvider() fields = prov.fields() - idx = vlayer.fieldNameIndex(poly_field) + # idx = vlayer.fieldNameIndex(poly_field) + idx = vlayer.fields().indexFromName(poly_field) dir_poly = self.plugin_dir + '/data/poly_temp.shp' - # j = 0 self.dlg.progressBar.setMaximum(vlayer.featureCount()) - # skapar referenser till lagern som laddas in av anvandaren i comboboxes if self.dlg.checkBoxOnlyBuilding.isChecked(): # Only building heights dsm_build = self.layerComboManagerDSMbuild.currentLayer() dsm = None @@ -359,24 +341,16 @@ def start_progress(self): radius = self.dlg.spinBoxDistance.value() degree = float(self.dlg.degreeBox.currentText()) - # Startar arbetarmetoden och traden, se startworker metoden ovan. self.startWorker(dsm, dem, dsm_build, poly, poly_field, vlayer, prov, fields, idx, dir_poly, self.iface, self.plugin_dir, self.folderPath, self.dlg, imid, radius, degree, rm) - # Allt som ska ske efter att arbetaren startats hanteras genom metoderna som tar emot signaler fran traden. - # Framforallt workerFinished metoden. Se impgworker.py filen for implementering av det arbete som traden utfor. - def run(self): - """Run method that performs all the real work""" - # show the dialog self.dlg.show() - # Run the dialog event loop self.dlg.exec_() - gdal.UseExceptions() gdal.AllRegister() def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Morphology%20Morphometric%20" \ - "Calculator%20(Grid).html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Urban_Morphology:" \ + "_Image_Morphometric_Parameter_Calculator_.28Grid.29" webbrowser.open_new_tab(url) \ No newline at end of file diff --git a/ImageMorphParam/image_morph_param_dialog.py b/ImageMorphParam/image_morph_param_dialog.py index dad81bd..1ea4bed 100644 --- a/ImageMorphParam/image_morph_param_dialog.py +++ b/ImageMorphParam/image_morph_param_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'image_morph_param_dialog_base.ui')) -class ImageMorphParamDialog(QtGui.QDialog, FORM_CLASS): +class ImageMorphParamDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(ImageMorphParamDialog, self).__init__(parent) diff --git a/ImageMorphParam/impgworker.py b/ImageMorphParam/impgworker.py index df59c78..b0b921f 100644 --- a/ImageMorphParam/impgworker.py +++ b/ImageMorphParam/impgworker.py @@ -1,11 +1,13 @@ +from builtins import str +from builtins import range # -*- coding: utf-8 -*- -from PyQt4 import QtCore -from PyQt4.QtCore import QVariant -from PyQt4.QtGui import QMessageBox # QFileDialog, QAction, QIcon, +from qgis.PyQt import QtCore +from qgis.PyQt.QtCore import QVariant +from qgis.PyQt.QtWidgets import QMessageBox # QFileDialog, QAction, QIcon, +from qgis.core import QgsFeature, QgsVectorFileWriter, QgsVectorDataProvider, QgsField, Qgis, QgsMessageLog # from qgis.gui import * -from qgis.core import QgsVectorLayer, QgsVectorFileWriter, QgsFeature, QgsRasterLayer, QgsGeometry, QgsMessageLog -from qgis.core import * -import traceback +# from qgis.core import QgsVectorLayer, QgsVectorFileWriter, QgsFeature, QgsRasterLayer, QgsGeometry, QgsMessageLog +# import traceback from ..Utilities.imageMorphometricParms_v1 import * from ..Utilities import RoughnessCalcFunctionV2 as rg from scipy import * @@ -21,6 +23,7 @@ import os import time + class Worker(QtCore.QObject): # Implementation av de signaler som traden skickar @@ -71,10 +74,7 @@ def run(self): ret = 0 imp_point = 0 - # Allt arbete en trad ska utforas maste goras i en try-sats try: - # j = 0 - # Loop som utfor det arbete som annars hade "hangt" anvandargranssnittet i Qgis pre = self.dlg.textOutput_prefix.text() header = ' Wd pai fai zH zHmax zHstd zd z0' numformat = '%3d %4.3f %4.3f %5.3f %5.3f %5.3f %5.3f %5.3f' @@ -88,10 +88,8 @@ def run(self): os.makedirs(self.folderPath[0] + '/' + pre) for f in self.vlayer.getFeatures(): # looping through each grid polygon - # Kollar sa att traden inte har avbrutits, ifall den har det sa slutar loopning. if self.killed is True: break - # pydevd.settrace('localhost', port=53100, stdoutToServer=True, stderrToServer=True) #used for debugging attributes = f.attributes() geometry = f.geometry() @@ -106,9 +104,10 @@ def run(self): # self.iface.messageBar().pushMessage("Test", str(loc)) else: r = 0 # Uses as info to separate from IMP point to grid - writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.geometryType(), + # writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.geometryType(), + # self.prov.crs(), "ESRI shapefile") + writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.wkbType(), self.prov.crs(), "ESRI shapefile") - if writer.hasError() != QgsVectorFileWriter.NoError: self.iface.messageBar().pushMessage("Error when creating shapefile: ", str(writer.hasError())) writer.addFeature(feature) @@ -190,7 +189,7 @@ def run(self): if np.sum(dsm_array) == (dsm_array.shape[0] * dsm_array.shape[1] * nd): QgsMessageLog.logMessage( "Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes Only NoData Pixels", - level=QgsMessageLog.CRITICAL) + level=Qgis.Critical) cal = 0 else: dsm_array[dsm_array == nd] = np.mean(dem_array) @@ -200,7 +199,7 @@ def run(self): if nodata_test.any(): # == True QgsMessageLog.logMessage( "Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes NoData Pixels", - level=QgsMessageLog.CRITICAL) + level=Qgis.Critical) cal = 0 else: cal = 1 @@ -312,10 +311,10 @@ def addattributes(self, vlayer, matdata, header, pre): vlayer.commitChanges() vlayer.updateFields() - if self.iface.mapCanvas().isCachingEnabled(): - vlayer.setCacheImage(None) - else: - self.iface.mapCanvas().refresh() + # if self.iface.mapCanvas().isCachingEnabled(): + # vlayer.setCacheImage(None) + # else: + self.iface.mapCanvas().refresh() else: QMessageBox.critical(None, "Error", "Vector Layer does not support adding attributes") diff --git a/ImageMorphParam/paramWorker.py b/ImageMorphParam/paramWorker.py deleted file mode 100644 index c632ba1..0000000 --- a/ImageMorphParam/paramWorker.py +++ /dev/null @@ -1,131 +0,0 @@ -from PyQt4 import QtCore, QtGui -import traceback -import numpy as np -from ..Utilities import shadowingfunctions as shadow -import Image -from scipy import * -import numpy as np -import scipy.ndimage.interpolation as sc -import PIL - -import sys - -#ARBETARMETOD FOR TRADKLASS SOM INTE ANNU AR IMPLEMENTERAD. FUNGERAR INTE FORRAN PROBLEM MED MANGA TRADAR HAR LOSTS - -class ParamWorker(QtCore.QObject): - - finished = QtCore.pyqtSignal(object, object, object) - error = QtCore.pyqtSignal(Exception, basestring) - progress = QtCore.pyqtSignal() - - def __init__(self, dsm, dem, scale, mid, dtheta, f, idx, dlg): - QtCore.QObject.__init__(self) - self.killed = False - self.dsm = dsm - self.dem = dem - self.scale = scale - self.mid = mid - self.dtheta = dtheta - self.f = f - self.idx = idx - self.dlg = dlg - - def run(self): - ret = None - - try: - build = self.dsm - self.dem - build[(build < 2.)] = 0. # building should be higher than 2 meter - - # new part - buildvec = build[np.where(build > 0)] - if buildvec.size > 0: - zH_all = buildvec.mean() - zHmax_all = buildvec.max() - zH_sd_all = buildvec.std() - pai_all = (buildvec.size * 1.0) / (build.size * 1.0) - else: - zH_all = 0 - zHmax_all = 0 - zH_sd_all = 0 - pai_all = 0 - - fai = np.zeros((72, 1)) - zH = np.zeros((72, 1)) - zHmax = np.zeros((72, 1)) - zH_sd = np.zeros((72, 1)) - pai = np.zeros((72, 1)) - deg = np.zeros((72, 1)) - - #%subset and center - n = self.dsm.shape[0] - imid = np.floor((n/2.)) - if self.mid == 1: - dY = np.int16(np.arange(np.dot(1, imid))) # the whole length of the grid (y) - #self.dlg.progressBar.setRange(0., 360. / self.dtheta) - else: - dY = np.int16(np.arange(np.dot(1, n))) # the whole length of the grid (y) - - dX = np.int16(np.arange(imid, imid+1)) - lx = dX.shape[0] - ly = dY.shape[0] - filt1 = np.ones((n, 1)) * -1 - filt2 = np.ones((n, 1)) - filt = np.array(np.hstack((filt1, filt2))).conj().T - j = int(0) - for angle in np.arange(0, (360.-self.dtheta+0) + self.dtheta, self.dtheta): - if self.killed is True: - break - - if self.mid == 1: - #self.dlg.progressBar.setValue(angle) - self.progress.emit() - - c = np.zeros((n, n)) - #d = sc.imrotate(build, angle, 'nearest') - d = sc.rotate(build, angle, reshape=False, mode='nearest') - b = ((build.max()-build.min())/d.max())*d+build.min() - a = b - if b.sum() != 0: # ground heights - #d = sc.imrotate(dsm, angle, 'nearest') - d = sc.rotate(build, angle, reshape=False, mode='nearest') - a = ((self.dsm.max()-self.dsm.min())/d.max())*d+self.dsm.min() - - #% convolve leading edge filter with domain - for i in np.arange(1, (n-1)+1): - if self.killed is True: - break - c[int(i)-1, :] = np.sum((filt*a[int(i)-1:i+1, :]), 0) - - wall = c[dY, dX] # wall array - wall = wall[np.where(wall > 2)] # wall vector - fai[j] = np.sum(wall)/((lx*ly)/self.scale) - bld = b[dY, dX] # building array - bld = bld[np.where(bld > 0)] # building vector - pai[j] = np.float32(bld.shape[0]) / (lx*ly) - deg[j] = angle - if np.float32(bld.shape[0]) == 0: - zH[j] = 0 - zHmax[j] = 0 - zH_sd[j] = 0 - else: - zH[j] = bld.sum() / np.float32(bld.shape[0]) - zHmax[j] = bld.max() - zH_sd[j] = bld.std() - - j += 1 - - immorphresult = {'fai': fai, 'pai': pai, 'zH': zH, 'deg': deg, 'zHmax': zHmax,'zH_sd': zH_sd, 'pai_all': pai_all, - 'zH_all': zH_all, 'zHmax_all': zHmax_all, 'zH_sd_all': zH_sd_all} - - if self.killed is False: - #self.progress.emit() - ret = immorphresult - - except Exception, e: - # forward the exception upstream - self.error.emit(e, traceback.format_exc()) - self.finished.emit(ret, self.f, self.idx) - - def kill(self): - self.killed = True diff --git a/ImageMorphParam/resources_rc.py b/ImageMorphParam/resources_rc.py index 4d73c27..64672ba 100644 --- a/ImageMorphParam/resources_rc.py +++ b/ImageMorphParam/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x01\x2d\ diff --git a/ImageMorphParmsPoint/imagemorphparmspoint_v1.py b/ImageMorphParmsPoint/imagemorphparmspoint_v1.py index ac97e01..bc1e647 100644 --- a/ImageMorphParmsPoint/imagemorphparmspoint_v1.py +++ b/ImageMorphParmsPoint/imagemorphparmspoint_v1.py @@ -20,10 +20,14 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion -from PyQt4.QtGui import QAction, QMessageBox, QFileDialog +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog from qgis.gui import * -from qgis.core import * +from qgis.core import QgsMapLayerProxyModel, QgsFeature, QgsGeometry, QgsVectorLayer, QgsPointXY, QgsVectorFileWriter, QgsProject import os from ..Utilities import RoughnessCalcFunctionV2 as rg from osgeo import gdal @@ -31,14 +35,14 @@ import webbrowser from ..Utilities.imageMorphometricParms_v1 import * # Initialize Qt resources from file resources.py -import resources_rc +# from . import resources_rc import sys # Import the code for the dialog -from imagemorphparmspoint_v1_dialog import ImageMorphParmsPointDialog +from .imagemorphparmspoint_v1_dialog import ImageMorphParmsPointDialog -class ImageMorphParmsPoint: +class ImageMorphParmsPoint(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -78,8 +82,10 @@ def __init__(self, iface): self.dlg.checkBoxOnlyBuilding.toggled.connect(self.text_enable) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) for i in range(1, 25): if 360 % i == 0: @@ -255,18 +261,17 @@ def create_point(self, point): # Var kommer point ifran??? # coords = "{}, {}".format(point.x(), point.y()) # self.iface.messageBar().pushMessage("Coordinate selected", str(coords)) self.dlg.closeButton.setEnabled(1) - QgsMapLayerRegistry.instance().addMapLayer(self.poiLayer) + QgsProject.instance().addMapLayer(self.poiLayer) # create the feature fc = int(self.provider.featureCount()) feature = QgsFeature() - feature.setGeometry(QgsGeometry.fromPoint(point)) + feature.setGeometry(QgsGeometry.fromPointXY(point)) feature.setAttributes([fc, point.x(), point.y()]) self.poiLayer.startEditing() - self.poiLayer.addFeature(feature, True) + self.poiLayer.addFeature(feature) # ,True self.poiLayer.commitChanges() self.poiLayer.triggerRepaint() - # self.create_poly_layer(point) # Flyttad till generate_area self.dlg.setEnabled(True) self.dlg.activateWindow() self.pointx = point.x() @@ -298,13 +303,13 @@ def generate_area(self): def select_point(self): # Connected to "Secelct Point on Canves" if self.poiLayer is not None: - QgsMapLayerRegistry.instance().removeMapLayer(self.poiLayer.id()) + QgsProject.instance().removeMapLayer(self.poiLayer.id()) if self.polyLayer is not None: self.polyLayer.startEditing() self.polyLayer.selectAll() self.polyLayer.deleteSelectedFeatures() self.polyLayer.commitChanges() - QgsMapLayerRegistry.instance().removeMapLayer(self.polyLayer.id()) + QgsProject.instance().removeMapLayer(self.polyLayer.id()) self.canvas.setMapTool(self.pointTool) # Calls a canvas click and create_point @@ -336,26 +341,17 @@ def create_poly_layer(self): # Assign feature the buffered geometry radius = self.dlg.spinBox.value() - featurepoly.setGeometry(QgsGeometry.fromPoint( - QgsPoint(self.pointx, self.pointy)).buffer(radius, 1000, 1, 1, 1.0)) + # featurepoly.setGeometry(QgsGeometry.fromPointXY(QgsPoint(self.pointx, self.pointy)).buffer(radius, 1000, 1, 1, 1.0)) + featurepoly.setGeometry( + QgsGeometry.fromPointXY(QgsPointXY(self.pointx, self.pointy)).buffer(radius, 1000, 1, 1, 1.0)) featurepoly.setAttributes([fc]) self.polyLayer.startEditing() - self.polyLayer.addFeature(featurepoly, True) + self.polyLayer.addFeature(featurepoly) self.polyLayer.commitChanges() - QgsMapLayerRegistry.instance().addMapLayer(self.polyLayer) - # props = {'color_border': '255,165,0,125', 'style': 'no', 'style_border': 'solid'} - # s = QgsFillSymbolV2.createSimple(props) - # self.polyLayer.setRendererV2(QgsSingleSymbolRendererV2(s)) - - self.polyLayer.setLayerTransparency(42) - # self.polyLayer.repaintRequested(None) - # self.polyLayer.setCacheImage(None) + QgsProject.instance().addMapLayer(self.polyLayer) + self.polyLayer.setOpacity(0.42) self.polyLayer.triggerRepaint() - #QObject.connect(self.dlg.selectpoint, SIGNAL("clicked()"), self.select_point) - - #def whatsthisclicked(self, href): - #webbrowser.open_new_tab(href) def text_enable(self): if self.dlg.checkBoxOnlyBuilding.isChecked(): @@ -395,7 +391,7 @@ def start_process(self): dir_poly = self.plugin_dir + '/data/poly_temp.shp' - writer = QgsVectorFileWriter(dir_poly, "CP1250", fields, prov.geometryType(), + writer = QgsVectorFileWriter(dir_poly, "CP1250", fields, prov.wkbType(), prov.crs(), "ESRI shapefile") if writer.hasError() != QgsVectorFileWriter.NoError: @@ -430,7 +426,9 @@ def start_process(self): gdalruntextdsm_build = gdalwarp_os_dep + ' -dstnodata -9999 -q -overwrite -te ' + str(x - r) + ' ' + str(y - r) + \ ' ' + str(x + r) + ' ' + str(y + r) + ' -of GTiff "' + \ filePath_dsm_build + '" "' + self.plugin_dir + '/data/clipdsm.tif"' - # byta till gdal.Warp("aae.tif","aaa.asc", xRes=2.0, yRes=2.0, dstSRS='EPSG:3007') + # gdalruntextdsm_build = 'gdalwarp -dstnodata -9999 -q -overwrite -cutline ' + dir_poly + \ + # ' -crop_to_cutline -of GTiff ' + filePath_dsm_build + \ + # ' ' + self.plugin_dir + '/data/clipdsm.tif' if sys.platform == 'win32': subprocess.call(gdalruntextdsm_build, startupinfo=si) else: @@ -564,6 +562,7 @@ def run(self): gdal.AllRegister() def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Morphology%20Morphometric%20" \ - "Calculator%20(Point).html" + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Urban_Morphology:" \ + "_Morphometric_Calculator_.28Point.29" webbrowser.open_new_tab(url) \ No newline at end of file diff --git a/ImageMorphParmsPoint/imagemorphparmspoint_v1_dialog.py b/ImageMorphParmsPoint/imagemorphparmspoint_v1_dialog.py index fe7209e..b721c8f 100644 --- a/ImageMorphParmsPoint/imagemorphparmspoint_v1_dialog.py +++ b/ImageMorphParmsPoint/imagemorphparmspoint_v1_dialog.py @@ -22,15 +22,14 @@ """ import os -import webbrowser - -from PyQt4 import QtGui, uic, QtCore +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'imagemorphparmspoint_v1_dialog_base.ui')) -class ImageMorphParmsPointDialog(QtGui.QDialog, FORM_CLASS): +class ImageMorphParmsPointDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(ImageMorphParmsPointDialog, self).__init__(parent) @@ -40,4 +39,3 @@ def __init__(self, parent=None): # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html # #widgets-and-dialogs-with-auto-connect self.setupUi(self) - diff --git a/ImageMorphParmsPoint/resources_rc.py b/ImageMorphParmsPoint/resources_rc.py index 102e0d2..7e7bb44 100644 --- a/ImageMorphParmsPoint/resources_rc.py +++ b/ImageMorphParmsPoint/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x01\x22\ diff --git a/LCZ_Converter/LCZ_converter.py b/LCZ_Converter/LCZ_converter.py index 3dd4698..760374e 100644 --- a/LCZ_Converter/LCZ_converter.py +++ b/LCZ_Converter/LCZ_converter.py @@ -20,23 +20,31 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QThread, QSettings, QTranslator, qVersion -from PyQt4.QtGui import QFileDialog, QIcon, QAction, QMessageBox, QTableWidgetItem +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import zip +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QThread, QSettings, QTranslator, qVersion +from qgis.PyQt.QtWidgets import QFileDialog, QAction, QMessageBox, QTableWidgetItem +from qgis.PyQt.QtGui import QIcon from qgis.core import * from qgis.gui import * import os import os.path from osgeo import gdal # Initialize Qt resources from file resources.py -import resources +# from . import resources import webbrowser -import qgis.analysis +# import qgis.analysis # Import the code for the dialog -from LCZ_converter_dialog import LCZ_testDialog -from LCZworker import Worker +from .LCZ_converter_dialog import LCZ_testDialog +from .LCZworker import Worker import numpy as np -class LCZ_test: +class LCZ_test(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -78,8 +86,10 @@ def __init__(self, iface): self.dlg.colorButton.clicked.connect(self.color) self.dlg.progressBar.setValue(0) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) # Save + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) # Save + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) if self.dlg.radioButton_2.isChecked(): self.dlg.pushButton_2.clicked.connect(self.updatetable) if self.dlg.radioButton.isChecked(): @@ -654,7 +664,8 @@ def start_progress(self): vlayer = QgsVectorLayer(poly.source(), "polygon", "ogr") prov = vlayer.dataProvider() fields = prov.fields() - idx = vlayer.fieldNameIndex(poly_field) + # idx = vlayer.fieldNameIndex(poly_field) + idx = vlayer.fields().indexFromName(poly_field) typetest = fields.at(idx).type() if typetest == 10: QMessageBox.critical(None, "ID field is sting type", "ID field must be either integer or float") @@ -727,7 +738,7 @@ def workerFinished(self, ret): def workerError(self, errorstring): #strerror = "Worker thread raised an exception: " + str(e) - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) def progress_update(self): self.steps +=1 @@ -754,5 +765,6 @@ def close(self): self.dlg.close() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Spatial%20Data%20LCZ%20Converter.html' + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Urban_Land_Cover:_LCZ_converter' webbrowser.open_new_tab(url) diff --git a/LCZ_Converter/LCZ_converter_dialog.py b/LCZ_Converter/LCZ_converter_dialog.py index bd20f0d..3ec6e4c 100644 --- a/LCZ_Converter/LCZ_converter_dialog.py +++ b/LCZ_Converter/LCZ_converter_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'LCZ_converter_dialog_base.ui')) -class LCZ_testDialog(QtGui.QDialog, FORM_CLASS): +class LCZ_testDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(LCZ_testDialog, self).__init__(parent) diff --git a/LCZ_Converter/LCZ_fractions.py b/LCZ_Converter/LCZ_fractions.py index 7fa7a66..9351f67 100644 --- a/LCZ_Converter/LCZ_fractions.py +++ b/LCZ_Converter/LCZ_fractions.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range import numpy as np from qgis.core import QgsMessageLog diff --git a/LCZ_Converter/LCZworker.py b/LCZ_Converter/LCZworker.py index a0b399b..48819ff 100644 --- a/LCZ_Converter/LCZworker.py +++ b/LCZ_Converter/LCZworker.py @@ -1,8 +1,12 @@ -from PyQt4 import QtCore -from PyQt4.QtCore import QVariant -from PyQt4.QtGui import QMessageBox +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import range +from qgis.PyQt import QtCore +from qgis.PyQt.QtCore import QVariant +from qgis.PyQt.QtWidgets import QMessageBox from qgis.core import * # QgsVectorLayer, QgsVectorFileWriter, QgsFeature, QgsRasterLayer, QgsGeometry, QgsMessageLog -from LCZ_fractions import * +from .LCZ_fractions import * import traceback import numpy as np from osgeo import gdal @@ -61,7 +65,7 @@ def run(self): feature.setAttributes(attributes) feature.setGeometry(geometry) - writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.geometryType(), + writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.wkbType(), self.prov.crs(), "ESRI shapefile") if writer.hasError() != QgsVectorFileWriter.NoError: @@ -87,7 +91,7 @@ def run(self): nd = dataset.GetRasterBand(1).GetNoDataValue() nodata_test = (lc_grid_array == nd) if np.sum(lc_grid_array) == (lc_grid_array.shape[0] * lc_grid_array.shape[1] * nd): - QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes Only NoData Pixels", level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes Only NoData Pixels", level=Qgis.Critical) cal = 0 else: lc_grid_array[lc_grid_array == nd] = 0 @@ -146,7 +150,7 @@ def run(self): self.progress.emit() ret = 1 - except Exception, e: + except Exception as e: ret = 0 #self.error.emit(e, traceback.format_exc()) errorstring = self.print_exception() @@ -221,7 +225,8 @@ def textFileCheck(self, pre): wrote_header = False for line in fileinput.input(file_path, inplace=1): if not wrote_header: - print line, + # fix_print_with_import + print(line, end=' ') wrote_header = True else: line_split = line.split() @@ -231,7 +236,8 @@ def textFileCheck(self, pre): total += float(line_split[x]) if total == 1.0: - print line, + # fix_print_with_import + print(line, end=' ') else: diff = total - 1.0 # QgsMessageLog.logMessage("Diff: " + str(diff), level=QgsMessageLog.CRITICAL) @@ -254,11 +260,12 @@ def textFileCheck(self, pre): string_to_print += str(line_split[-1]) string_to_print += '\n' - print string_to_print, + # fix_print_with_import + print(string_to_print, end=' ') fileinput.close() - except Exception, e: + except Exception as e: errorstring = self.print_exception() - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) fileinput.close() def kill(self): diff --git a/LCZ_Converter/resources.py b/LCZ_Converter/resources.py index 17e4cd7..df9a105 100644 --- a/LCZ_Converter/resources.py +++ b/LCZ_Converter/resources.py @@ -6,7 +6,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/LandCoverFractionGrid/landcoverfraction_grid.py b/LandCoverFractionGrid/landcoverfraction_grid.py index 0717b65..07d3143 100644 --- a/LandCoverFractionGrid/landcoverfraction_grid.py +++ b/LandCoverFractionGrid/landcoverfraction_grid.py @@ -20,21 +20,28 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QThread -from PyQt4.QtGui import QFileDialog, QIcon, QAction, QMessageBox +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QThread +from qgis.PyQt.QtWidgets import QFileDialog, QAction, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.core import * from qgis.gui import * import os from osgeo import gdal -from landcoverfraction_grid_dialog import LandCoverFractionGridDialog +from .landcoverfraction_grid_dialog import LandCoverFractionGridDialog import os.path -from lcfracworker import Worker +from .lcfracworker import Worker import webbrowser # Initialize Qt resources from file resources.py -import resources_rc +# from . import resources_rc -class LandCoverFractionGrid: +class LandCoverFractionGrid(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -71,8 +78,10 @@ def __init__(self, iface): self.dlg.progressBar.setValue(0) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) # Save + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) # Save + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) for i in range(1, 25): if 360 % i == 0: @@ -209,7 +218,8 @@ def start_progress(self): vlayer = QgsVectorLayer(poly.source(), "polygon", "ogr") prov = vlayer.dataProvider() fields = prov.fields() - idx = vlayer.fieldNameIndex(poly_field) + # idx = vlayer.fieldNameIndex(poly_field) + idx = vlayer.fields().indexFromName(poly_field) typetest = fields.at(idx).type() if typetest == 10: @@ -293,7 +303,7 @@ def workerFinished(self, ret): "process unsuccessful! See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.") def workerError(self, errorstring): - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) def progress_update(self): self.steps += 1 @@ -306,6 +316,5 @@ def run(self): gdal.AllRegister() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Land%20Cover%20Land%20Cover%20' \ - 'Fraction%20(Grid).html' + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Urban_Land_Cover:_Land_Cover_Fraction_.28Grid.29' webbrowser.open_new_tab(url) diff --git a/LandCoverFractionGrid/landcoverfraction_grid_dialog.py b/LandCoverFractionGrid/landcoverfraction_grid_dialog.py index 2946fac..d9f6a2a 100644 --- a/LandCoverFractionGrid/landcoverfraction_grid_dialog.py +++ b/LandCoverFractionGrid/landcoverfraction_grid_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'landcoverfraction_grid_dialog_base.ui')) -class LandCoverFractionGridDialog(QtGui.QDialog, FORM_CLASS): +class LandCoverFractionGridDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(LandCoverFractionGridDialog, self).__init__(parent) diff --git a/LandCoverFractionGrid/lcfracworker.py b/LandCoverFractionGrid/lcfracworker.py index af38075..862a7fd 100644 --- a/LandCoverFractionGrid/lcfracworker.py +++ b/LandCoverFractionGrid/lcfracworker.py @@ -1,6 +1,10 @@ -from PyQt4 import QtCore -from PyQt4.QtCore import QVariant -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import print_function +from builtins import str +from builtins import range +from qgis.PyQt import QtCore +from qgis.PyQt.QtCore import QVariant +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon # from qgis.gui import * from qgis.core import * # QgsVectorLayer, QgsVectorFileWriter, QgsFeature, QgsRasterLayer, QgsGeometry, QgsMessageLog import traceback @@ -82,7 +86,7 @@ def run(self): x = f.geometry().centroid().asPoint().x() else: r = 0 - writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.geometryType(), + writer = QgsVectorFileWriter(self.dir_poly, "CP1250", self.fields, self.prov.wkbType(), self.prov.crs(), "ESRI shapefile") if writer.hasError() != QgsVectorFileWriter.NoError: @@ -116,14 +120,14 @@ def run(self): nodata_test = (lc_grid_array == nd) if self.dlg.checkBoxNoData.isChecked(): if np.sum(lc_grid_array) == (lc_grid_array.shape[0] * lc_grid_array.shape[1] * nd): - QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes Only NoData Pixels", level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes Only NoData Pixels", level=Qgis.Critical) cal = 0 else: lc_grid_array[lc_grid_array == nd] = 6 cal = 1 else: if nodata_test.any(): # == True - QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes NoData Pixels", level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage("Grid " + str(f.attributes()[self.idx]) + " not calculated. Includes NoData Pixels", level=Qgis.Critical) cal = 0 else: cal = 1 @@ -168,7 +172,7 @@ def run(self): self.progress.emit() ret = 1 - except Exception, e: + except Exception as e: ret = 0 #self.error.emit(e, traceback.format_exc()) errorstring = self.print_exception() @@ -197,6 +201,8 @@ def addattributes(self, vlayer, matdata, header, pre): for x in range(1, len(line_split)): vlayer.dataProvider().addAttributes([QgsField(pre + '_' + line_split[x], QVariant.Double)]) + vlayer.commitChanges() + vlayer.updateFields() attr_dict = {} @@ -205,10 +211,9 @@ def addattributes(self, vlayer, matdata, header, pre): idx = int(matdata[y, 0]) for x in range(1, matdata.shape[1]): attr_dict[current_index_length + x - 1] = float(matdata[y, x]) - #QMessageBox.information(None, "Error", str(line_split[x])) vlayer.dataProvider().changeAttributeValues({idx: attr_dict}) - #vlayer.commitChanges() + vlayer.commitChanges() vlayer.updateFields() else: QMessageBox.critical(None, "Error", "Vector Layer does not support adding attributes") @@ -241,22 +246,21 @@ def textFileCheck(self, pre): wrote_header = False for line in fileinput.input(file_path, inplace=1): if not wrote_header: - print line, + # fix_print_with_import + print(line, end=' ') wrote_header = True else: line_split = line.split() total = 0. - # QgsMessageLog.logMessage(str(line), level=QgsMessageLog.CRITICAL) for x in range(1, len(line_split)): total += float(line_split[x]) if total == 1.0: - print line, + # fix_print_with_import + print(line, end=' ') else: diff = total - 1.0 - # QgsMessageLog.logMessage("Diff: " + str(diff), level=QgsMessageLog.CRITICAL) max_number = max(line_split[1:]) - # QgsMessageLog.logMessage("Max number: " + str(max_number), level=QgsMessageLog.CRITICAL) for x in range(1, len(line_split)): if float(max_number) == float(line_split[x]): @@ -274,11 +278,12 @@ def textFileCheck(self, pre): string_to_print += str(line_split[-1]) string_to_print += '\n' - print string_to_print, + # fix_print_with_import + print(string_to_print, end=' ') fileinput.close() - except Exception, e: + except Exception as e: errorstring = self.print_exception() - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) fileinput.close() def kill(self): diff --git a/LandCoverFractionGrid/resources_rc.py b/LandCoverFractionGrid/resources_rc.py index a05b47c..9a79ba9 100644 --- a/LandCoverFractionGrid/resources_rc.py +++ b/LandCoverFractionGrid/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/LandCoverFractionPoint/landcover_fraction_point.py b/LandCoverFractionPoint/landcover_fraction_point.py index f95acec..543ea7b 100644 --- a/LandCoverFractionPoint/landcover_fraction_point.py +++ b/LandCoverFractionPoint/landcover_fraction_point.py @@ -20,10 +20,16 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox -from qgis.gui import * -from qgis.core import * +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon +from qgis.gui import QgsMapLayerComboBox, QgsMapToolEmitPoint +from qgis.core import QgsMapLayerProxyModel, QgsFeature, QgsGeometry, QgsVectorLayer, QgsPointXY, \ + QgsVectorFileWriter, QgsProject import os from ..Utilities.landCoverFractions_v1 import * from osgeo import gdal @@ -31,12 +37,12 @@ import sys import webbrowser # Initialize Qt resources from file resources.py -import resources_rc +# from . import resources_rc # Import the code for the dialog -from landcover_fraction_point_dialog import LandCoverFractionPointDialog +from .landcover_fraction_point_dialog import LandCoverFractionPointDialog -class LandCoverFractionPoint: +class LandCoverFractionPoint(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -75,8 +81,10 @@ def __init__(self, iface): self.dlg.progressBar.setValue(0) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) for i in range(1, 25): if 360 % i == 0: @@ -86,9 +94,6 @@ def __init__(self, iface): # Declare instance attributes self.actions = [] self.menu = self.tr(u'&Land Cover Fraction Point') - # TODO: We are going to let the user set this up in a future iteration - # self.toolbar = self.iface.addToolBar(u'LandCoverFractionPoint') - # self.toolbar.setObjectName(u'LandCoverFractionPoint') # get reference to the canvas self.canvas = self.iface.mapCanvas() @@ -243,15 +248,15 @@ def create_point(self, point): # Var kommer point ifran??? # coords = "{}, {}".format(point.x(), point.y()) # self.iface.messageBar().pushMessage("Coordinate selected", str(coords)) self.dlg.closeButton.setEnabled(1) - QgsMapLayerRegistry.instance().addMapLayer(self.poiLayer) + QgsProject.instance().addMapLayer(self.poiLayer) # create the feature fc = int(self.provider.featureCount()) feature = QgsFeature() - feature.setGeometry(QgsGeometry.fromPoint(point)) + feature.setGeometry(QgsGeometry.fromPointXY(point)) feature.setAttributes([fc, point.x(), point.y()]) self.poiLayer.startEditing() - self.poiLayer.addFeature(feature, True) + self.poiLayer.addFeature(feature) self.poiLayer.commitChanges() self.poiLayer.triggerRepaint() # self.create_poly_layer(point) # Flyttad till generate_area @@ -286,13 +291,13 @@ def generate_area(self): def select_point(self): # Connected to "Secelct Point on Canves" if self.poiLayer is not None: - QgsMapLayerRegistry.instance().removeMapLayer(self.poiLayer.id()) + QgsProject.instance().removeMapLayer(self.poiLayer.id()) if self.polyLayer is not None: self.polyLayer.startEditing() self.polyLayer.selectAll() self.polyLayer.deleteSelectedFeatures() self.polyLayer.commitChanges() - QgsMapLayerRegistry.instance().removeMapLayer(self.polyLayer.id()) + QgsProject.instance().removeMapLayer(self.polyLayer.id()) self.canvas.setMapTool(self.pointTool) # Calls a canvas click and create_point @@ -324,19 +329,18 @@ def create_poly_layer(self): # Assign feature the buffered geometry radius = self.dlg.spinBox.value() - featurepoly.setGeometry(QgsGeometry.fromPoint( - QgsPoint(self.pointx, self.pointy)).buffer(radius, 1000, 1, 1, 1.0)) + featurepoly.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(self.pointx, self.pointy)).buffer(radius, 1000, 1, 1, 1.0)) featurepoly.setAttributes([fc]) self.polyLayer.startEditing() - self.polyLayer.addFeature(featurepoly, True) + self.polyLayer.addFeature(featurepoly) #, True self.polyLayer.commitChanges() - QgsMapLayerRegistry.instance().addMapLayer(self.polyLayer) + QgsProject.instance().addMapLayer(self.polyLayer) # props = {'color_border': '255,165,0,125', 'style': 'no', 'style_border': 'solid'} # s = QgsFillSymbolV2.createSimple(props) # self.polyLayer.setRendererV2(QgsSingleSymbolRendererV2(s)) - self.polyLayer.setLayerTransparency(42) + self.polyLayer.setOpacity(0.42) # self.polyLayer.repaintRequested(None) # self.polyLayer.setCacheImage(None) self.polyLayer.triggerRepaint() @@ -374,7 +378,7 @@ def start_process(self): dir_poly = self.plugin_dir + '/data/poly_temp.shp' - writer = QgsVectorFileWriter(dir_poly, "CP1250", fields, prov.geometryType(), + writer = QgsVectorFileWriter(dir_poly, "CP1250", fields, prov.wkbType(), prov.crs(), "ESRI shapefile") if writer.hasError() != QgsVectorFileWriter.NoError: @@ -474,7 +478,7 @@ def run(self): self.dlg.exec_() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Land%20Cover%20Land%20Cover%20' \ - 'Fraction%20(Point).html' + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Urban_Land_Cover:_Land_Cover_Fraction_.28Point.29' webbrowser.open_new_tab(url) diff --git a/LandCoverFractionPoint/landcover_fraction_point_dialog.py b/LandCoverFractionPoint/landcover_fraction_point_dialog.py index f7e3ef8..f8f673e 100644 --- a/LandCoverFractionPoint/landcover_fraction_point_dialog.py +++ b/LandCoverFractionPoint/landcover_fraction_point_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'landcover_fraction_point_dialog_base.ui')) -class LandCoverFractionPointDialog(QtGui.QDialog, FORM_CLASS): +class LandCoverFractionPointDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(LandCoverFractionPointDialog, self).__init__(parent) diff --git a/LandCoverFractionPoint/resources_rc.py b/LandCoverFractionPoint/resources_rc.py index 4c94985..9693191 100644 --- a/LandCoverFractionPoint/resources_rc.py +++ b/LandCoverFractionPoint/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/LandCoverReclassifier/land_cover_reclassifier.py b/LandCoverReclassifier/land_cover_reclassifier.py index a82c31f..3f6dd5f 100644 --- a/LandCoverReclassifier/land_cover_reclassifier.py +++ b/LandCoverReclassifier/land_cover_reclassifier.py @@ -20,19 +20,24 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion -from PyQt4.QtGui import QFileDialog, QIcon, QAction, QMessageBox +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion +from qgis.PyQt.QtWidgets import QFileDialog, QAction, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.core import * from qgis.gui import * # Initialize Qt resources from file resources.py -import resources_rc +# from . import resources_rc # Import the code for the dialog -from land_cover_reclassifier_dialog import LandCoverReclassifierDialog +from .land_cover_reclassifier_dialog import LandCoverReclassifierDialog import os.path from ..Utilities.misc import * import webbrowser -class LandCoverReclassifier: +class LandCoverReclassifier(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -68,8 +73,8 @@ def __init__(self, iface): self.dlg.helpButton.clicked.connect(self.help) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(0) - self.fileDialog.setAcceptMode(1) # Save + # self.fileDialog.setFileMode(0) + # self.fileDialog.setAcceptMode(1) # Save self.fileDialog.setNameFilter("(*.tif *.tiff)") # Declare instance attributes @@ -271,7 +276,7 @@ def run(self): self.dlg.exec_() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Land%20Cover%20Land%20Cover%20' \ - 'Reclassifier.html' + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Urban_Land_Cover:_Land_Cover_Reclassifier' webbrowser.open_new_tab(url) diff --git a/LandCoverReclassifier/land_cover_reclassifier_dialog.py b/LandCoverReclassifier/land_cover_reclassifier_dialog.py index 98fc668..260492f 100644 --- a/LandCoverReclassifier/land_cover_reclassifier_dialog.py +++ b/LandCoverReclassifier/land_cover_reclassifier_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'land_cover_reclassifier_dialog_base.ui')) -class LandCoverReclassifierDialog(QtGui.QDialog, FORM_CLASS): +class LandCoverReclassifierDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(LandCoverReclassifierDialog, self).__init__(parent) diff --git a/LandCoverReclassifier/resources_rc.py b/LandCoverReclassifier/resources_rc.py index c128628..2c695a3 100644 --- a/LandCoverReclassifier/resources_rc.py +++ b/LandCoverReclassifier/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/LucyQF/LQF.py b/LucyQF/LQF.py index a4bd77e..2336a52 100644 --- a/LucyQF/LQF.py +++ b/LucyQF/LQF.py @@ -20,20 +20,24 @@ * * ***************************************************************************/ """ +from __future__ import absolute_import +from builtins import str +from builtins import object -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QThread, Qt -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QThread, Qt +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon from qgis.gui import QgsMessageBar from qgis.core import QgsVectorLayer, QgsMessageLog -from LQF_dialog import LQFDialog +from .LQF_dialog import LQFDialog import os.path import webbrowser # LUCY specific code -from PythonLUCY.LUCY import Model -from time_displayer import time_displayer +from .PythonLUCY.LUCY import Model +from .time_displayer import time_displayer from datetime import datetime as dt from datetime import timedelta as timedelta -from PythonLUCY.Disaggregate import DisaggregateWorker +from .PythonLUCY.Disaggregate import DisaggregateWorker import traceback try: @@ -43,7 +47,7 @@ pass -class LQF: +class LQF(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -75,7 +79,7 @@ def __init__(self, iface): # Check dependencies try: import pandas - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'LQF requires the pandas package to be installed. ' 'Please consult the manual for further information') @@ -84,7 +88,7 @@ def __init__(self, iface): # Check dependencies try: import netCDF4 - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'LQF requires the NetCDF4 package to be installed. ' 'Please consult the manual for further information') @@ -93,7 +97,7 @@ def __init__(self, iface): # Check dependencies try: import matplotlib - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'LQF requires the matplotlib package to be installed. ' 'Please consult the manual for further information') @@ -204,7 +208,7 @@ def chooseProcessedDataPath(self): # Check for manifest file or reject try: self.model.setPreProcessedInputFolder(selectedFolder) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error setting processed data path', str(e)) return self.dlg.txtProcessedDataPath.setText(selectedFolder) @@ -287,7 +291,7 @@ def workerError(self, strException): self.dlg.cmdPrepare.setEnabled(True) def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/processor/Urban%20Energy%20Balance%20LQ.html" + url = "http://urban-climate.net/umep/UMEP_Manual#Urban_Energy_Balance:_LQF" webbrowser.open_new_tab(url) def dataSources(self): @@ -299,7 +303,7 @@ def dataSources(self): df = a.selectedFiles() try: self.model.setDataSources(df[0]) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Invalid Data Sources file provided', str(e)) return @@ -364,7 +368,7 @@ def loadParams(self): cf = a.selectedFiles() try: self.model.setParameters(cf[0]) - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Invalid parameters file', str(e)) return @@ -381,7 +385,7 @@ def loadResults(self): # Check for manifest file or reject try: locations = self.model.loadModelResults(selectedFolder) - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error loading previous model results', str(e)) return @@ -549,7 +553,7 @@ def startWorker(self): self.dlg.cmdLoadResults.setEnabled(True) self.dlg.cmdVisualise.setEnabled(True) - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error running LQF', str(e)) QgsMessageLog.logMessage(traceback.format_exc(), level=QgsMessageLog.WARNING) diff --git a/LucyQF/LQF_dialog.py b/LucyQF/LQF_dialog.py index 2a11e78..bd9c856 100644 --- a/LucyQF/LQF_dialog.py +++ b/LucyQF/LQF_dialog.py @@ -23,14 +23,15 @@ import os -from PyQt4 import QtGui, uic, QtCore +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'LQF_dialog_base.ui')) -class LQFDialog(QtGui.QDialog, FORM_CLASS): +class LQFDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(LQFDialog, self).__init__(parent) diff --git a/LucyQF/PythonLUCY/DailyTemperature.py b/LucyQF/PythonLUCY/DailyTemperature.py index 0090cc3..9402ecf 100644 --- a/LucyQF/PythonLUCY/DailyTemperature.py +++ b/LucyQF/PythonLUCY/DailyTemperature.py @@ -1,16 +1,21 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import map +from builtins import object # Object that stores and retrieves temperature each day import os from string import lower import pytz -from DataManagement.DailyLoading import DailyLoading +from .DataManagement.DailyLoading import DailyLoading try: import pandas as pd except: pass -from DataManagement.LookupLogger import LookupLogger +from .DataManagement.LookupLogger import LookupLogger -class DailyTemperature: +class DailyTemperature(object): ''' Manage daily temperature time series ''' @@ -61,12 +66,12 @@ def dealWithInputFile(self, file): # Check file is of correct format dl = pd.read_csv(file,skipinitialspace=True) - dl.columns = map(lower, dl.columns) + dl.columns = list(map(lower, dl.columns)) # Expect certain keywords - if 't_celsius' not in map(lower, dl.keys()): + if 't_celsius' not in list(map(lower, list(dl.keys()))): raise ValueError('One of the column headers in ' + file + ' must be \'T_celsius\'') - rowHeaders = map(lower, dl.data[0:3]) + rowHeaders = list(map(lower, dl.data[0:3])) if 'startdate' != rowHeaders[0]: raise ValueError('First column of second row must be \'StartDate\' in ' + file) @@ -89,7 +94,7 @@ def dealWithInputFile(self, file): try: sd = pd.datetime.strptime(dl.t_celsius[0], '%Y-%m-%d') ed = pd.datetime.strptime(dl.t_celsius[1], '%Y-%m-%d') - except Exception, e: + except Exception as e: raise Exception('The second and third rows of ' + file + ' must be dates in the format YYYY-mm-dd. Got:' + dl.t_celsius[0] + ' and ' + dl.t_celsius[1]) @@ -109,4 +114,5 @@ def test(): a.addTemperatureData('N:\QF_China\Beijing\dailyTemperature_2013_Beijing.csv') #a.addTemperatureData('N:\QF_Heraklion\LUCYConfig\dailyTemperature_2016_Heraklion.csv') for dt in dr: - print str(dt) + str(a.getTemp(dt.to_datetime(), 3600)) + # fix_print_with_import + print(str(dt) + str(a.getTemp(dt.to_datetime(), 3600))) diff --git a/LucyQF/PythonLUCY/DataManagement/DailyLoading.py b/LucyQF/PythonLUCY/DataManagement/DailyLoading.py index 0f80085..e43ca8d 100644 --- a/LucyQF/PythonLUCY/DataManagement/DailyLoading.py +++ b/LucyQF/PythonLUCY/DataManagement/DailyLoading.py @@ -1,10 +1,13 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map import pytz try: import numpy as np except: pass -from GenericAnnualSampler import GenericAnnualSampler -from temporalHelpers import * +from .GenericAnnualSampler import GenericAnnualSampler +from .temporalHelpers import * class DailyLoading(GenericAnnualSampler): # Object to store and retrieve annualised pandas time series @@ -78,7 +81,7 @@ def extractCorrectEntry(self, df, endOfTimestep, timestepDuration, wd): # Within the series, find the most recent occurrence of this day of week # Is the section of data provided to us correct? It should be, given earlier stages, but still... - dows_available = map(self.getDOW, [d.to_pydatetime() for d in df.index]) + dows_available = list(map(self.getDOW, [d.to_pydatetime() for d in df.index])) # Return the value and the corresponding date from which it came dateNeeded = (endOfTimestep - timedelta(seconds=timestepDuration-1)) use = np.array(dows_available) == wd @@ -101,6 +104,6 @@ def hasDOW(self, dow, year): # Get days of week present in each startDate's entry. # Each entry must be a pandas timeseries, in which case the day of week is converted from the timestamp dates = [d.to_pydatetime() for d in self.yearContents[startDate]['data'].index] - result[startDate] = (dow in list(np.unique(map(self.getDOW, dates)))) + result[startDate] = (dow in list(np.unique(list(map(self.getDOW, dates))))) return pd.Series(result) \ No newline at end of file diff --git a/LucyQF/PythonLUCY/DataManagement/GenericAnnualSampler.py b/LucyQF/PythonLUCY/DataManagement/GenericAnnualSampler.py index 3ff2792..95c429c 100644 --- a/LucyQF/PythonLUCY/DataManagement/GenericAnnualSampler.py +++ b/LucyQF/PythonLUCY/DataManagement/GenericAnnualSampler.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map +from builtins import object # Class to handle temporal profiles for different year, season, day of week and time of day # to make it easy to pull the relevant number out @@ -7,8 +11,8 @@ except: pass from dateutil.relativedelta import * -from temporalHelpers import * -from LookupLogger import LookupLogger +from .temporalHelpers import * +from .LookupLogger import LookupLogger from datetime import datetime as dt from datetime import date as dateType @@ -51,7 +55,7 @@ def specialHolidays(self, holidayDates): self.extraHolidays = holidayDates def niceDate(dateobj): return dateobj.strftime('%Y-%m-%d') if holidayDates not in [None, []]: - self.logger.addEvent('TemporalSampler', None, None, None, 'Special bank holidays added: ' + str(map(niceDate, holidayDates))) + self.logger.addEvent('TemporalSampler', None, None, None, 'Special bank holidays added: ' + str(list(map(niceDate, holidayDates)))) def useUKHolidays(self, state): '''Use UK bank holidays: Christmas, Boxing day, New Year's day, Easter Friday and Monday, May day, early and late summer diff --git a/LucyQF/PythonLUCY/DataManagement/LookupLogger.py b/LucyQF/PythonLUCY/DataManagement/LookupLogger.py index a99f1cf..9436c03 100644 --- a/LucyQF/PythonLUCY/DataManagement/LookupLogger.py +++ b/LucyQF/PythonLUCY/DataManagement/LookupLogger.py @@ -1,5 +1,7 @@ +from builtins import str +from builtins import object from collections import OrderedDict -class LookupLogger: +class LookupLogger(object): # Logger object to keep track of data requests and what was provided when the available data didn't match the requested date def __init__(self): @@ -19,10 +21,10 @@ def addEvent(self, eventType, requestedDate, actualDate, paramName, description) :return: None ''' - if eventType not in self.log.keys(): + if eventType not in list(self.log.keys()): self.log[eventType] = OrderedDict() - if requestedDate not in self.log[eventType].keys(): + if requestedDate not in list(self.log[eventType].keys()): self.log[eventType][requestedDate] = [] newEntry = [actualDate, paramName, description] @@ -50,13 +52,13 @@ def writeFile(self, filename): ''' try: f = open(filename, 'w') - except Exception,e: + except Exception as e: raise Exception('Could not write to log file:' + str(filename) + ':' + str(e)) f.write('Requested Date (if applic):: Date returned (if applic) :: Param name :: Description\r\n') - for eventType in self.log.keys(): + for eventType in list(self.log.keys()): f.write('======' + str(eventType) + '=======\r\n') - for requestTime in self.log[eventType].keys(): + for requestTime in list(self.log[eventType].keys()): printReqTime = 'None' if requestTime is None else requestTime.strftime('%Y-%m-%d %H:%M:%S %Z') for logLine in self.log[eventType][requestTime]: printActualTime = 'None' if logLine[0] is None else logLine[0].strftime('%Y-%m-%d %H:%M:%S %Z') diff --git a/LucyQF/PythonLUCY/DataManagement/SpatialAttributesSampler.py b/LucyQF/PythonLUCY/DataManagement/SpatialAttributesSampler.py index bc11303..bd37bf7 100644 --- a/LucyQF/PythonLUCY/DataManagement/SpatialAttributesSampler.py +++ b/LucyQF/PythonLUCY/DataManagement/SpatialAttributesSampler.py @@ -1,13 +1,16 @@ -from spatialHelpers import * +from __future__ import absolute_import +from builtins import str +from builtins import map +from .spatialHelpers import * from qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform -from PyQt4.QtCore import QVariant +from qgis.PyQt.QtCore import QVariant try: import pandas as pd except: pass -from SpatialTemporalResampler import SpatialTemporalResampler +from .SpatialTemporalResampler import SpatialTemporalResampler def intorstring(x): try: return int(x) @@ -72,7 +75,7 @@ def resampleLayer(self, inputLayer, fieldsToSample, weightby=None, inputIdField= # Get read-across between so feature ID can be ascertained from name according to chosen ID field t = shapefile_attributes(newShapeFile)[self.templateIdField] - readAcross = pd.Series(index=map(intorstring, t.values), data=map(intorstring, t.index)) + readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index))) t = None # If the inputLayer and outputLayer spatial units are the same, then disaggregation does not need to happen. @@ -93,7 +96,7 @@ def resampleLayer(self, inputLayer, fieldsToSample, weightby=None, inputIdField= # Work out disaggregation factor baed on area intersected # Use "big" totals of weightings if the same attribute present in the input data file - newShapeFile.setSelectedFeatures(list(readAcross[intersectedAreas.keys()])) + newShapeFile.setSelectedFeatures(list(readAcross[list(intersectedAreas.keys())])) selectedOutputFeatures = newShapeFile.selectedFeatures() newShapeFile.startEditing() # Apply disaggregation to features @@ -102,11 +105,11 @@ def resampleLayer(self, inputLayer, fieldsToSample, weightby=None, inputIdField= # Take the input area with the largest intersected area if there is more than one to choose from rawData = intersectedAreas[outputFeat[self.templateIdField]] - numEntries = len(rawData.keys()) + numEntries = len(list(rawData.keys())) if numEntries == 0: continue elif numEntries == 1: - inputValues = rawData.values()[0] + inputValues = list(rawData.values())[0] elif numEntries > 1: area_info = pd.DataFrame(rawData).transpose() diff --git a/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler.py b/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler.py index 9340902..60a076f 100644 --- a/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler.py +++ b/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler.py @@ -1,8 +1,12 @@ -from spatialHelpers import * +from __future__ import absolute_import +from builtins import map +from builtins import str +from builtins import object +from .spatialHelpers import * from qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform import processing -from PyQt4.QtCore import QVariant, QSettings +from qgis.PyQt.QtCore import QVariant, QSettings try: import pandas as pd import numpy as np @@ -12,10 +16,10 @@ import os from datetime import datetime as dt import tempfile -from LookupLogger import LookupLogger +from .LookupLogger import LookupLogger -class SpatialTemporalResampler: +class SpatialTemporalResampler(object): # Class that takes spatial data (QgsVectorLayers), associates them with a time and # allows them to be spatially resampled to output polygons based on attribute values # Also supports a single value for all space via same interface @@ -47,7 +51,7 @@ def setOutputShapefile(self, shapefile, epsgCode, id_field): self.templateIdField = id_field self.templateEpsgCode = int(epsgCode) - if type(shapefile) in [str, unicode]: + if type(shapefile) in [str, str]: if not os.path.exists(shapefile): raise ValueError('Shapefile: ' + shapefile + ' does not exist') # Try and load a copy of the shapefile into memory. Allow explosion if fails. @@ -67,7 +71,7 @@ def setOutputShapefile(self, shapefile, epsgCode, id_field): # Create mapping from real (numeric) feature ID to desired (string) feature ID a = shapefile_attributes(self.outputLayer)[id_field] - self.featureMapper = pd.Series(index = a.index, data = map(intOrString, a.values)) + self.featureMapper = pd.Series(index = a.index, data = list(map(intOrString, a.values))) # record what was used to label features if self.logger is not None: self.logger.addEvent('Disagg', None, None, None, 'Labelling features using ' + id_field + ' for ' + str(shapefile)) @@ -80,7 +84,7 @@ def getOutputShapefile(self): return self.outputLayer def getOutputFeatureIds(self): - return shapefile_attributes(self.outputLayer).keys() + return list(shapefile_attributes(self.outputLayer).keys()) def dealWithSingleValue(self, value, startTime, attributeToUse): ''' Create a QgsVectorLayer based on self.outputLayer with field attributeToUse the same value all the way through ''' @@ -146,7 +150,7 @@ def dealWithVectorLayer(self, shapefileInput, epsgCode, startTime, attributeToUs # Load the layer try: vectorLayer = openShapeFileInMemory(shapefileInput, targetEPSG=self.templateEpsgCode) - except Exception, e: + except Exception as e: raise ValueError('Could not load shapefile at ' + shapefileInput) if reprojected: @@ -185,7 +189,7 @@ def injectInput(self, shapefileInput, epsgCode, attributeToUse, startTime): if startTime.tzinfo is None: raise ValueError('Start time must have a timezone attached') - if type(shapefileInput) not in [str, unicode]: + if type(shapefileInput) not in [str, str]: raise ValueError('Shapefile input (' + str(shapefileInput) + ') is not a string filename') if not os.path.exists(shapefileInput): @@ -194,7 +198,7 @@ def injectInput(self, shapefileInput, epsgCode, attributeToUse, startTime): # Load the layer straight from disk as we won't be making any modifications to it try: vectorLayer = loadShapeFile(shapefileInput) - except Exception, e: + except Exception as e: raise ValueError('Could not load shapefile at ' + shapefileInput) if type(attributeToUse) is not list: @@ -212,7 +216,7 @@ def updateLayers(self, attributeToUse, layer, startTime): # ID field should be the same for both input layer and the one being considered, as both have been disaggregated # using the output layer - satts.index = map(intOrString, satts[self.templateIdField].loc[satts.index.tolist()]) + satts.index = list(map(intOrString, satts[self.templateIdField].loc[satts.index.tolist()])) # Replace any QNullVariants with pd.nan if self.dataLayers is None: @@ -359,7 +363,7 @@ def intorstring(x): except: return str(x) - readAcross = pd.Series(index=map(intorstring, t.values), data=map(intorstring, t.index)) + readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index))) t = None # Get areas of input shapefile intersected by output shapefile, and proportions covered, and attribute vals @@ -390,28 +394,28 @@ def intorstring(x): # Select successfully identified output areas - newShapeFile.setSelectedFeatures(list(readAcross[disagg.keys()])) + newShapeFile.setSelectedFeatures(list(readAcross[list(disagg.keys())])) selectedOutputFeatures = newShapeFile.selectedFeatures() newShapeFile.startEditing() # Apply disaggregation to features for outputFeat in selectedOutputFeatures: # For each output feature # Select the relevant features from the input layer - area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in disagg[outputFeat[self.templateIdField]].keys()} + area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in list(disagg[outputFeat[self.templateIdField]].keys())} # Calculate area-weighted average to get a single value for each output area for field in fieldsToSample: # The values to disaggregate in all regions touching this output feature - input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in intersectedAreas[outputFeat[self.templateIdField]].keys()} + input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in list(intersectedAreas[outputFeat[self.templateIdField]].keys())} # If an output area is influenced by multiple input areas, and a subset of these is invalid, # assign them zero - for i in input_values.keys(): + for i in list(input_values.keys()): try: input_values[i] = float(input_values[i]) except: input_values[i] = 0 # Combine values in all input regions touching this output feature. If disagg_weightings missed one out it's because no intersection or NULL data. # Any value intersecting an output area with NULL weighting will be excluded - outputAreasToUse = set(input_values.keys()).intersection(area_weightings.keys()) + outputAreasToUse = set(input_values.keys()).intersection(list(area_weightings.keys())) weighted_average = np.sum(np.array([input_values[in_id] * float(area_weightings[in_id]) for in_id in list(outputAreasToUse)])) newShapeFile.changeAttributeValue(outputFeat.id(), fieldIndices[field], float(weighted_average)) @@ -442,7 +446,7 @@ def addInput(self, input, startTime, attributeToUse, inputFieldId, weight_by=Non if type(input) is float: # Assume a single value for all space return self.dealWithSingleValue(input, startTime, 'SINGLEVAL') - if type(input) in ([unicode, str]): # Assume a filename + if type(input) in ([str, str]): # Assume a filename if epsgCode is None: raise ValueError('EPSG code must be provided if a shapefile is input') return self.dealWithVectorLayer(input, epsgCode, startTime, attributeToUse, weight_by, inputFieldId) diff --git a/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler_LUCY.py b/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler_LUCY.py index 32df77a..5840adc 100644 --- a/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler_LUCY.py +++ b/LucyQF/PythonLUCY/DataManagement/SpatialTemporalResampler_LUCY.py @@ -1,8 +1,11 @@ -from spatialHelpers import * +from __future__ import absolute_import +from builtins import map +from builtins import str +from .spatialHelpers import * from qgis.core import QgsField, QgsVectorLayer, QgsSpatialIndex, QgsMessageLog, QgsCoordinateReferenceSystem, QgsCoordinateTransform import processing -from PyQt4.QtCore import QVariant, QSettings +from qgis.PyQt.QtCore import QVariant, QSettings try: import pandas as pd import numpy as np @@ -12,8 +15,8 @@ import os from datetime import datetime as dt import tempfile -from LookupLogger import LookupLogger -from SpatialTemporalResampler import SpatialTemporalResampler +from .LookupLogger import LookupLogger +from .SpatialTemporalResampler import SpatialTemporalResampler class SpatialTemporalResampler_LUCY(SpatialTemporalResampler): # Class that takes spatial data (QgsVectorLayers), associates them with a time and @@ -74,7 +77,7 @@ def intorstring(x): except: return str(x) - readAcross = pd.Series(index=map(intorstring, t.values), data=map(intorstring, t.index)) + readAcross = pd.Series(index=list(map(intorstring, t.values)), data=list(map(intorstring, t.index))) t = None # Get areas of input shapefile intersected by output shapefile, and proportions covered, and attribute vals @@ -100,26 +103,26 @@ def intorstring(x): disagg = disaggregate_weightings(intersectedAreas, newShapeFile, weight_by, total_weightings, self.templateIdField)[weight_by] # Select successfully identified output areas - newShapeFile.setSelectedFeatures(list(readAcross[disagg.keys()])) + newShapeFile.setSelectedFeatures(list(readAcross[list(disagg.keys())])) selectedOutputFeatures = newShapeFile.selectedFeatures() newShapeFile.startEditing() # Apply disaggregation to features for outputFeat in selectedOutputFeatures: # For each output feature # Select the relevant features from the input layer - area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in disagg[outputFeat[self.templateIdField]].keys()} + area_weightings = {inputAreaId: disagg[outputFeat[self.templateIdField]][inputAreaId] for inputAreaId in list(disagg[outputFeat[self.templateIdField]].keys())} # Calculate area-weighted average to get a single value for each output area for field in fieldsToSample: - input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in intersectedAreas[outputFeat[self.templateIdField]].keys()} + input_values = {inputAreaId: intersectedAreas[outputFeat[self.templateIdField]][inputAreaId][field] for inputAreaId in list(intersectedAreas[outputFeat[self.templateIdField]].keys())} # If an output area is influenced by multiple input areas, and a subset of these is invalid, # assign them zero - for i in input_values.keys(): + for i in list(input_values.keys()): try: input_values[i] = float(input_values[i]) except: input_values[i] = 0 - outputAreasToUse = set(input_values.keys()).intersection(area_weightings.keys()) + outputAreasToUse = set(input_values.keys()).intersection(list(area_weightings.keys())) weighted_average = np.sum(np.array([input_values[in_id] * float(area_weightings[in_id]) for in_id in list(outputAreasToUse)])) newShapeFile.changeAttributeValue(outputFeat.id(), fieldIndices[field], float(weighted_average)) diff --git a/LucyQF/PythonLUCY/DataManagement/TemporalProfileSampler.py b/LucyQF/PythonLUCY/DataManagement/TemporalProfileSampler.py index e1a7619..e27c0c1 100644 --- a/LucyQF/PythonLUCY/DataManagement/TemporalProfileSampler.py +++ b/LucyQF/PythonLUCY/DataManagement/TemporalProfileSampler.py @@ -1,11 +1,13 @@ +from __future__ import absolute_import +from builtins import str # Class to handle temporal profiles for different year, season, day of week and time of day # to make it easy to pull the relevant number out try: import numpy as np except: pass -from temporalHelpers import * -from GenericAnnualSampler import GenericAnnualSampler +from .temporalHelpers import * +from .GenericAnnualSampler import GenericAnnualSampler class TemporalProfileSampler(GenericAnnualSampler): diff --git a/LucyQF/PythonLUCY/DataManagement/samplerHelpers.py b/LucyQF/PythonLUCY/DataManagement/samplerHelpers.py index 922c249..2e4afdd 100644 --- a/LucyQF/PythonLUCY/DataManagement/samplerHelpers.py +++ b/LucyQF/PythonLUCY/DataManagement/samplerHelpers.py @@ -44,7 +44,7 @@ def addPeriod(obj, startDate, endDate, weekSeries, timezone=pytz.timezone('Europ startEndDates[y] = [max(dt(y, 1, 1), startDate), min(dt(y, 12, 31), endDate)] # Go round each year (if necessary), adding entries - for y in startEndDates.keys(): + for y in list(startEndDates.keys()): # Within the year, set start date of period as DOY sd = startEndDates[y][0] # start date ed = startEndDates[y][1] # End date @@ -53,7 +53,7 @@ def addPeriod(obj, startDate, endDate, weekSeries, timezone=pytz.timezone('Europ formattedSeries = obj.dealWithSeries(weekSeries) # Add straight to the dict if it's the first entry for the year - if y not in obj.yearContents.keys(): + if y not in list(obj.yearContents.keys()): obj.yearContents[y] = pd.Series(index=[sd], data=[{'isDST': isDST, 'data': formattedSeries.copy(deep=True)}]) diff --git a/LucyQF/PythonLUCY/DataManagement/spatialHelpers.py b/LucyQF/PythonLUCY/DataManagement/spatialHelpers.py index 07c85c9..2e007e7 100644 --- a/LucyQF/PythonLUCY/DataManagement/spatialHelpers.py +++ b/LucyQF/PythonLUCY/DataManagement/spatialHelpers.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range # Helper methods to do spatial and shapefile-related manipulations # amg 23/06/2016 import os @@ -8,9 +10,9 @@ pass from qgis.core import QgsVectorFileWriter, QgsVectorLayer, QgsRasterLayer, QgsGeometry, QgsRaster, QgsRectangle, QgsPoint, QgsField, QgsFeature, QgsSpatialIndex -from qgis.core import QgsMapLayerRegistry, QgsSymbolV2, QgsGraduatedSymbolRendererV2, QgsRendererRangeV2, QgsFeatureRequest, QgsExpression, QgsDistanceArea, QgsCoordinateReferenceSystem, QgsCoordinateTransform +from qgis.core import QgsSymbol, QgsGraduatedSymbolRenderer, QgsRendererRange, QgsFeatureRequest, QgsDistanceArea, QgsCoordinateReferenceSystem, QgsCoordinateTransform import processing # qgis processing framework -from PyQt4.QtCore import QVariant, QPyNullVariant +from qgis.PyQt.QtCore import QVariant, QPyNullVariant import tempfile def reprojectVectorLayer_threadSafe(filename, targetEpsgCode): @@ -72,7 +74,7 @@ def reprojectVectorLayer(filename, targetEpsgCode): # Copy features for orig_feat in orig_layer.getFeatures(): orig_id = orig_feat.id() - for fieldName in orig_fieldNames.keys(): + for fieldName in list(orig_fieldNames.keys()): try: new_val = float(orig_feat[orig_fieldNames[fieldName]]) reproj_layer.changeAttributeValue(orig_id, reproj_fieldNames[fieldName], new_val) @@ -119,10 +121,10 @@ def calculate_fuel_use(inputLayer, inputIdField, petrolFields = {'motorcycle':'_FC_Pmcyc', 'artic':'_FC_Part', 'rigid':'_FC_Prig', 'taxi':'_FC_Ptaxi', 'car':'_FC_Pcar', 'bus':'_FC_Pbus', 'lgv':'_FC_Plgv'} # Get overall list of new attrib names - consumption_attributes = dieselFields.values() - consumption_attributes.extend(petrolFields.values()) + consumption_attributes = list(dieselFields.values()) + consumption_attributes.extend(list(petrolFields.values())) fieldMap = {'diesel':dieselFields, 'petrol':petrolFields} - modelledTypes = petrolFields.keys() + modelledTypes = list(petrolFields.keys()) # Read-across from our road classes to EuroClass road classes (used in the FuelConsumption object) roadAcross = {'motorway':'motorway', 'primary_road':'urban', 'secondary_road':'urban', 'other':'urban'} @@ -138,7 +140,7 @@ def calculate_fuel_use(inputLayer, inputIdField, for roadType in roadTypes: # For each road type in the file # If we don't explicitly consider this road type as motorway, A road or B road, just consider it "other" - if roadType not in roadTypeLookup.keys(): + if roadType not in list(roadTypeLookup.keys()): roadTypeOfficial = 'other' else: roadTypeOfficial = roadTypeLookup[roadType] @@ -208,7 +210,7 @@ def calculate_fuel_use(inputLayer, inputIdField, for key in modelledTypes: aadtData[key] = np.array(inputLayer.getDoubleValues(vAADTFields[key], selectedOnly = True)[0]) # Populate car, bus and LGV differently to above - for fuelType in fieldMap.keys(): + for fuelType in list(fieldMap.keys()): newValues[fieldMap[fuelType]['car']].loc[ids] = aadtData['total_car'] *\ modelParams.fuelFractions['car'][fuelType] *\ lkm *\ @@ -224,8 +226,8 @@ def calculate_fuel_use(inputLayer, inputIdField, alreadyCalculated = ['car', 'lgv', 'bus'] # The remaining vehicle types all get calculated in the same way so loop over fieldMap to save on code... - for fuelType in fieldMap.keys(): - for vehType in fieldMap[fuelType].keys(): + for fuelType in list(fieldMap.keys()): + for vehType in list(fieldMap[fuelType].keys()): if vehType not in alreadyCalculated: newValues[fieldMap[fuelType][vehType]].loc[ids] = aadtData[vehType] * \ modelParams.fuelFractions[vehType][fuelType] * \ @@ -477,9 +479,9 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # turn the input dictionary inside out to get all the output features touching a given input feature input_features = {} - for out_id in intersectedAmounts.keys(): - for in_id in intersectedAmounts[out_id].keys(): - if in_id not in input_features.keys(): + for out_id in list(intersectedAmounts.keys()): + for in_id in list(intersectedAmounts[out_id].keys()): + if in_id not in list(input_features.keys()): input_features[in_id] = {} # Each entry contains the area intersected, size and values of the input feature # Append the weighting attributes to the input data dict @@ -500,11 +502,11 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute disagg_weightings = {wa:{} for wa in weightingAttributes} # A weighting for each weighting attrib totals_already_available = True - if len(total_weightings.keys()) == 0: + if len(list(total_weightings.keys())) == 0: totals_already_available = False total_weightings = {wa:{} for wa in weightingAttributes} - elif len(total_weightings.keys()) != len(weightingAttributes): + elif len(list(total_weightings.keys())) != len(weightingAttributes): raise ValueError('Total weightings are not present for all weighting attributes') num_outfeats = {} # Keep track of the number of output features (reflects partial overlaps) intersecting each input feature @@ -514,7 +516,7 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # everything in a partially-covered input area leaping into the output areas that cover it. inputAreaCovered = {} - for in_id in input_features.keys(): + for in_id in list(input_features.keys()): num_outfeats[in_id] = 0.0 if not totals_already_available: # Keep a running total of the weightings falling within input feature for normalisation @@ -523,24 +525,24 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # Add up what proportion of input area has been covered inputAreaCovered[in_id] = 0.0 - for out_id in input_features[in_id].keys(): + for out_id in list(input_features[in_id].keys()): # If not all of the output area intersects the input area, don't use all of the output area's weighting # Use proportion_of_output_area_intersected * weighting as the weighting. This prevents an output area from "stealing" # all of the disaggregated value when only a sliver intersects the input area try: fraction_intersected = input_features[in_id][out_id]['amountIntersected'] / input_features[in_id][out_id]['output_feature_area'] - except ZeroDivisionError,e: + except ZeroDivisionError as e: raise ValueError('The output area with ID %s has an area of zero. This is not allowed'%(str(out_id),)) num_outfeats[in_id] += fraction_intersected if not totals_already_available: try: inputAreaCovered[in_id] += input_features[in_id][out_id]['amountIntersected']/input_features[in_id][out_id]['originalAmount'] - except ZeroDivisionError,e: + except ZeroDivisionError as e: raise ValueError('The input area with ID %s has an area of zero. This is not allowed'%(str(in_id),)) for wa in weightingAttributes: - if out_id not in disagg_weightings[wa].keys(): # If none of the output areas in this input area, just allocate empty entries + if out_id not in list(disagg_weightings[wa].keys()): # If none of the output areas in this input area, just allocate empty entries disagg_weightings[wa][out_id] = {} # Dict contains contribution from each input ID intersecting this out_id for wa in weightingAttributes: @@ -558,8 +560,8 @@ def disaggregate_weightings(intersectedAmounts, output_layer, weightingAttribute # in which case the totals are rightly used to downscale the "big" value. # If we find the total is zero over the whole input area, then spread everything evenly across inside that area # to prevent throwing away the quantity to be disaggregated - for in_id in input_features.keys(): - for out_id in input_features[in_id].keys(): + for in_id in list(input_features.keys()): + for out_id in list(input_features[in_id].keys()): for wa in weightingAttributes: # Only use those values that are available (may have been skipped above) try: @@ -890,8 +892,8 @@ def duplicateVectorLayer(inLayer, targetEPSG=None, label=None): def colourRanges(displayLayer, attribute, opacity, range_minima, range_maxima, colours): - from qgis.core import QgsMapLayerRegistry, QgsSymbolV2, QgsGraduatedSymbolRendererV2, QgsRendererRangeV2 - from PyQt4.QtGui import QColor + from qgis.core import QgsSymbol, QgsGraduatedSymbolRenderer, QgsRendererRange + from qgis.PyQt.QtGui import QColor # Colour vector layer according to the value of attribute <>, with ranges set out by <> (list), <> (list) # using <> @@ -899,17 +901,17 @@ def colourRanges(displayLayer, attribute, opacity, range_minima, range_maxima, c rangeList = [] transparent = QColor(QColor(0, 0, 0, 0)) for i in range(0, len(range_minima)): - symbol = QgsSymbolV2.defaultSymbol(displayLayer.geometryType()) + symbol = QgsSymbol.defaultSymbol(displayLayer.geometryType()) symbol.setColor(QColor(colours[i])) symbol.setAlpha(opacity) symbol.symbolLayer(0).setOutlineColor(transparent) - valueRange = QgsRendererRangeV2(range_minima[i], range_maxima[i], symbol, + valueRange = QgsRendererRange(range_minima[i], range_maxima[i], symbol, str(range_minima[i]) + ' - ' + str(range_maxima[i])) rangeList.append(valueRange) - renderer = QgsGraduatedSymbolRendererV2('', rangeList) - renderer.setMode(QgsGraduatedSymbolRendererV2.EqualInterval) + renderer = QgsGraduatedSymbolRenderer('', rangeList) + renderer.setMode(QgsGraduatedSymbolRenderer.EqualInterval) renderer.setClassAttribute(attribute) displayLayer.setRendererV2(renderer) @@ -928,9 +930,9 @@ def populateShapefileFromTemplate(dataMatrix, primaryKey, templateShapeFile, :param templateEpsgCode: ''' - from qgis.core import QgsVectorLayer, QgsField, QgsFeature, QgsSpatialIndex, QgsMapLayerRegistry, QgsMessageLog - from PyQt4.QtCore import QVariant - if type(templateShapeFile) in [unicode, str]: + from qgis.core import QgsVectorLayer, QgsField, QgsFeature, QgsSpatialIndex, QgsMessageLog + from qgis.PyQt.QtCore import QVariant + if type(templateShapeFile) in [str, str]: # Open existing layer and try to set its CRS layer=openShapeFileInMemory(templateShapeFile, templateEpsgCode, label=title) diff --git a/LucyQF/PythonLUCY/DataManagement/temporalHelpers.py b/LucyQF/PythonLUCY/DataManagement/temporalHelpers.py index eabdbd6..d9ef2a5 100644 --- a/LucyQF/PythonLUCY/DataManagement/temporalHelpers.py +++ b/LucyQF/PythonLUCY/DataManagement/temporalHelpers.py @@ -1,3 +1,4 @@ +from builtins import str # Helper methods for temporal calculations and calendar events from datetime import datetime as dt @@ -51,10 +52,10 @@ def holidaysForYear(year): # Christmas day/boxing day falling on weekend isn't included (assumed standard weekend) holidays = [] # New year: - holidays.append(dt(year, 01, 01)) + holidays.append(dt(year, 0o1, 0o1)) # If 2 or 3 january is a monday, this is the bank holiday - jan2 = dt(year, 01, 02) - jan3 = dt(year, 01, 03) + jan2 = dt(year, 0o1, 0o2) + jan3 = dt(year, 0o1, 0o3) if jan2.weekday() == 0: holidays.append(jan2) if jan3.weekday() == 0: @@ -67,7 +68,7 @@ def holidaysForYear(year): holidays.extend([good_fri, easter_mon]) # Early and late may - may1 = dt(year, 05, 01) + may1 = dt(year, 0o5, 0o1) may1 = may1 if may1.weekday() is 0 else may1 + timedelta(7 - may1.weekday()) holidays.append(may1) holidays.append(dt(year, 5, 31) - timedelta(dt(year, 5, 31).weekday())) diff --git a/LucyQF/PythonLUCY/DataManagement/utilities.py b/LucyQF/PythonLUCY/DataManagement/utilities.py index 4fc0935..901e58c 100644 --- a/LucyQF/PythonLUCY/DataManagement/utilities.py +++ b/LucyQF/PythonLUCY/DataManagement/utilities.py @@ -1,3 +1,6 @@ +from builtins import str +from builtins import map +from builtins import range import os import re from pytz import timezone @@ -32,35 +35,35 @@ def convert_list_item(x): string_build = '' - for k in input_dict.keys(): + for k in list(input_dict.keys()): string_build += "&" + k + "\n" t = type(input_dict[k]) val = None if t is list: # Convert list to comma separated list - val = map(convert_list_item, input_dict[k]) + val = list(map(convert_list_item, input_dict[k])) val = ",".join(val) string_build += ' ' + k + ' = ' + val + "\n" elif (t is int) or (t is float): # Convert to string val = str(input_dict[k]) string_build += ' ' + k + ' = ' + val + "\n" - elif (t is str) or (t is unicode): + elif (t is str) or (t is str): # Take it literally val = encapsulate(input_dict[k]) string_build += ' ' + k + ' = ' + val + "\n" elif (t is dict): # Allow one-deep recursion so groups can exist - for kk in input_dict[k].keys(): + for kk in list(input_dict[k].keys()): t2 = type(input_dict[k][kk]) if t2 is list: # Convert list to comma separated list - val = map(convert_list_item, input_dict[k][kk]) + val = list(map(convert_list_item, input_dict[k][kk])) val = ",".join(val) elif (t2 is int) or (t2 is float): # Convert to string val = str(input_dict[k][kk]) - elif (t2 is str) or (t2 is unicode): + elif (t2 is str) or (t2 is str): # Take it literally val = encapsulate(input_dict[k][kk]) else: @@ -106,13 +109,13 @@ def results_to_ncdf(results_path): fileList = pd.Series(fileList) firstFile = pd.read_csv(fileList[0], header=0, index_col=0) outputAreas = list(firstFile.index) - outputAreas = range(0, 10000) + outputAreas = list(range(0, 10000)) # Assume each QF value is 4 bytes # Each file contains 3 columns: Total QF (32float), Building QF(32float), index (int) for combination of heating/cooling parameters # - output_x = range(300) # West-east coordinates - output_y = range(300) # south-north coordinates + output_x = list(range(300)) # West-east coordinates + output_y = list(range(300)) # south-north coordinates expectedSize = len(outputAreas) * len(fileList) * 4 * 3 maxSize = 2000000000 * 0.95 # 0.95 gives a bit of space diff --git a/LucyQF/PythonLUCY/Disaggregate.py b/LucyQF/PythonLUCY/Disaggregate.py index 274bb25..21e6b78 100644 --- a/LucyQF/PythonLUCY/Disaggregate.py +++ b/LucyQF/PythonLUCY/Disaggregate.py @@ -1,21 +1,23 @@ -from PyQt4.QtCore import QObject, pyqtSignal +from __future__ import absolute_import +from builtins import map +from qgis.PyQt.QtCore import QObject, pyqtSignal import traceback import os import pickle -from DataManagement.spatialHelpers import saveLayerToFile, loadShapeFile, populateShapefileFromTemplate, openShapeFileInMemory, reprojectVectorLayer_threadSafe -from DataManagement.temporalHelpers import makeUTC +from .DataManagement.spatialHelpers import saveLayerToFile, loadShapeFile, populateShapefileFromTemplate, openShapeFileInMemory, reprojectVectorLayer_threadSafe +from .DataManagement.temporalHelpers import makeUTC try: import pandas as pd except: pass -from Population import Population -from ExtraDisaggregate import ExtraDisaggregate, performDisaggregation, performSampling -from RegionalParameters import RegionalParameters +from .Population import Population +from .ExtraDisaggregate import ExtraDisaggregate, performDisaggregation, performSampling +from .RegionalParameters import RegionalParameters class DisaggregateWorker(QObject): finished = pyqtSignal(object) update = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) def __init__(self, ds, params, outputFolder, UMEPgrid=None, UMEPcoverFractions=None, UMEPgridID=None): QObject.__init__(self) self.killed = False @@ -33,7 +35,7 @@ def run(self): try: outputFolder = disaggregate(self.ds, self.params, self.outputFolder, self.UMEPgrid, self.UMEPcoverFractions, self.UMEPgridID, self.update) self.finished.emit(outputFolder) - except Exception,e: + except Exception as e: self.error.emit(e, traceback.format_exc()) def floatOrNone(x): @@ -70,7 +72,7 @@ def disaggregate(ds, params, outputFolder, UMEPgrid=None, UMEPcoverFractions=Non rp = ds.resPop_spat[0] # Take a look at the residential population data and see if there are any people in it. testPop = loadShapeFile(rp['shapefile'], rp['epsgCode']) - vals = pd.Series(map(floatOrNone, testPop.getValues('Pop')[0])) + vals = pd.Series(list(map(floatOrNone, testPop.getValues('Pop')[0]))) if sum(vals.dropna()) == 0: raise Exception('The input population file has zero population') testPop = None @@ -86,7 +88,7 @@ def disaggregate(ds, params, outputFolder, UMEPgrid=None, UMEPcoverFractions=Non scaledPopFile = os.path.join(outputFolder, filename) saveLayerToFile(lyr, scaledPopFile, pop.getOutputLayer().crs(), 'Res pop scaled') # Test the disaggregated shapefile to make sure it contains people - vals = pd.Series(map(floatOrNone, lyr.getValues('Pop')[0])) + vals = pd.Series(list(map(floatOrNone, lyr.getValues('Pop')[0]))) if sum(vals.dropna()) == 0: raise Exception('The output shapefile did not overlap any of the population data, so the model cannot run') returnDict['resPop'].append({'file':filename, 'EPSG':rp['epsgCode'], 'startDate':rp['startDate'], 'attribute':attrib, 'featureIds':outFeatIds}) diff --git a/LucyQF/PythonLUCY/ExtraDisaggregate.py b/LucyQF/PythonLUCY/ExtraDisaggregate.py index bd50283..56fc747 100644 --- a/LucyQF/PythonLUCY/ExtraDisaggregate.py +++ b/LucyQF/PythonLUCY/ExtraDisaggregate.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from builtins import map +from builtins import str # Performs supplementary spatial disaggregation of GQF outputs. # Identifies relationships between two layers: A set of gridded land cover fractions and the QGF output grid # Saves dict that summarises this relationship so that disaggregation can occur @@ -7,7 +10,7 @@ except: pass -from DataManagement.spatialHelpers import openShapeFileInMemory, populateShapefileFromTemplate, disaggregate_weightings, intersecting_amounts, feature_areas, shapefile_attributes, intOrString, reprojectVectorLayer_threadSafe +from .DataManagement.spatialHelpers import openShapeFileInMemory, populateShapefileFromTemplate, disaggregate_weightings, intersecting_amounts, feature_areas, shapefile_attributes, intOrString, reprojectVectorLayer_threadSafe from qgis.core import QgsSpatialIndex def ExtraDisaggregate(modelOutAreas, landCoverData, landCoverGrid, landCoverWeights, modelOutputIdField, gridIdField): @@ -88,13 +91,13 @@ def performDisaggregation(layerToDisaggregate, idField, fieldsToDisaggregate, we areas = pd.Series(feature_areas(layerToDisaggregate)) atts = shapefile_attributes(layerToDisaggregate) areaNames = atts[idField] - areas.index = map(intOrString, areaNames[areas.index]) - atts.index = map(intOrString, areaNames[atts.index]) + areas.index = list(map(intOrString, areaNames[areas.index])) + atts.index = list(map(intOrString, areaNames[atts.index])) atts = atts[fieldsToDisaggregate] # Data frame of row:input_id, col:parameter and data = quantity to disagg # Apply disaggregation to features - result = pd.DataFrame(index=weightings[weightingType].keys(), columns=atts.columns) - for oa in weightings[weightingType].keys(): + result = pd.DataFrame(index=list(weightings[weightingType].keys()), columns=atts.columns) + for oa in list(weightings[weightingType].keys()): vals = pd.Series(weightings[weightingType][oa]) # Series of input_id:weighting result.loc[oa] = atts.loc[vals.index].transpose().multiply(vals).transpose().sum() # Produces a data frame of the weighted contribution from each input ID, then does col sums @@ -115,19 +118,19 @@ def performSampling(layerToSample, idField, fieldsToSample, intersectedAmounts): # Build data frame of what to actually sample atts = shapefile_attributes(layerToSample) areaNames = atts[idField] - atts.index = map(intOrString, areaNames[atts.index]) + atts.index = list(map(intOrString, areaNames[atts.index])) atts = atts[fieldsToSample] # Data frame of row:input_id, col:parameter and data = quantity to disagg # Sample from features - result = pd.DataFrame(index=intersectedAmounts.keys(), columns=fieldsToSample) - for oa in intersectedAmounts.keys(): + result = pd.DataFrame(index=list(intersectedAmounts.keys()), columns=fieldsToSample) + for oa in list(intersectedAmounts.keys()): rawData =intersectedAmounts[oa] - numEntries = len(rawData.keys()) + numEntries = len(list(rawData.keys())) if numEntries == 0: continue elif numEntries == 1: - inputId = rawData.keys()[0] + inputId = list(rawData.keys())[0] elif numEntries > 1: area_info = pd.DataFrame(rawData).transpose() # Take attribute from input area that most intersects the output area diff --git a/LucyQF/PythonLUCY/LUCY.py b/LucyQF/PythonLUCY/LUCY.py index 9dd4cc4..3c8a2d6 100644 --- a/LucyQF/PythonLUCY/LUCY.py +++ b/LucyQF/PythonLUCY/LUCY.py @@ -1,3 +1,8 @@ +from __future__ import absolute_import +from builtins import str +from builtins import map +from builtins import range +from builtins import object import os import re from datetime import datetime as dt @@ -14,23 +19,23 @@ except: pass -from RegionalParameters import RegionalParameters -from LUCYDiurnalProfile import LUCYDiurnalProfile -from Disaggregate import disaggregate -from LUCYfunctions import qm, qb, qt, offset, increasePerHDD, increasePerCDD -from DailyTemperature import DailyTemperature -from LUCYDataSources import LUCYDataSources -from LUCYParams import LUCYParams -from MetabolismProfiles import MetabolismProfiles -from DataManagement.spatialHelpers import intOrString, loadShapeFile -from DataManagement.temporalHelpers import is_holiday -from PyQt4.QtCore import QObject, pyqtSignal +from .RegionalParameters import RegionalParameters +from .LUCYDiurnalProfile import LUCYDiurnalProfile +from .Disaggregate import disaggregate +from .LUCYfunctions import qm, qb, qt, offset, increasePerHDD, increasePerCDD +from .DailyTemperature import DailyTemperature +from .LUCYDataSources import LUCYDataSources +from .LUCYParams import LUCYParams +from .MetabolismProfiles import MetabolismProfiles +from .DataManagement.spatialHelpers import intOrString, loadShapeFile +from .DataManagement.temporalHelpers import is_holiday +from qgis.PyQt.QtCore import QObject, pyqtSignal class LQFWorker(QObject): finished = pyqtSignal(object) update = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) def __init__(self, ds, params, outputFolder, UMEPgrid=None, UMEPcoverFractions=None, UMEPgridID=None): QObject.__init__(self) self.killed = False @@ -48,10 +53,10 @@ def run(self): try: outputFolder = disaggregate(self.ds, self.params, self.outputFolder, self.UMEPgrid, self.UMEPcoverFractions, self.UMEPgridID, self.update) self.finished.emit(outputFolder) - except Exception,e: + except Exception as e: self.error.emit(e, traceback.format_exc()) -class Model(): +class Model(object): ''' Class that encapsulates a GreaterQF model instance''' def __init__(self): # Define the subfolders that should be present after each model run @@ -252,7 +257,7 @@ def setupAndRun(self, startDates, endDates): regPar = RegionalParameters() regPar.setWorldDatabase(self.ds.database) # If the user went for extra disaggregation, use this. If not, don't - if 'extra_disagg' in self.processedDataList.keys(): + if 'extra_disagg' in list(self.processedDataList.keys()): # Create three copies of the regional parameters object, each using a different "population" to disaggregate buildings, vehicles and metabolism mt = self.processedDataList['extra_disagg']['metabolism'][0] tp = self.processedDataList['extra_disagg']['transport'][0] @@ -310,10 +315,10 @@ def setupAndRun(self, startDates, endDates): other_holidays=combinedHolidays) # Generate 7-day cycle from reference data, with weekends in the right places given local country cycle = pd.concat([weekdayTraffic.loc[country]]*7) - cycle.index = range(0, len(cycle)) + cycle.index = list(range(0, len(cycle))) for wdd in weekendDays[country]: - cycle[range(wdd*24, (1+wdd)*24)] = weekendTraffic.loc[country].values # Overwrite the weekend days with weekend values - trafficProfile[country].addWeeklyCycle(cityTimezone.localize(dt(2015,01,01)), cityTimezone.localize(dt(2015,12,31)), cycle) # 2015 is arbitrary; will work for all years. + cycle[list(range(wdd*24, (1+wdd)*24))] = weekendTraffic.loc[country].values # Overwrite the weekend days with weekend values + trafficProfile[country].addWeeklyCycle(cityTimezone.localize(dt(2015,0o1,0o1)), cityTimezone.localize(dt(2015,12,31)), cycle) # 2015 is arbitrary; will work for all years. # Building cycle: Same approach as for Traffic cycle if self.ds.diurnEnergy is not None: @@ -332,10 +337,10 @@ def setupAndRun(self, startDates, endDates): use_uk_holidays=self.parameters.use_uk_hols, other_holidays=combinedHolidays) cycle = pd.concat([weekdayBldg.loc[country]]*7) - cycle.index = range(0, len(cycle)) + cycle.index = list(range(0, len(cycle))) for wdd in weekendDays[country]: - cycle[range(wdd*24, (1+wdd)*24)] = weekendBldg.loc[country].values # Overwrite the weekend days with weekend values - bldgProfile[country].addWeeklyCycle(cityTimezone.localize(dt(2015,01,01)), cityTimezone.localize(dt(2015,12,31)), cycle) # 2015 is arbitrary; will work for all years. + cycle[list(range(wdd*24, (1+wdd)*24))] = weekendBldg.loc[country].values # Overwrite the weekend days with weekend values + bldgProfile[country].addWeeklyCycle(cityTimezone.localize(dt(2015,0o1,0o1)), cityTimezone.localize(dt(2015,12,31)), cycle) # 2015 is arbitrary; will work for all years. # This version of the model doesn't allow a custom metabolic cycle. metabCycles = MetabolismProfiles(self.parameters.timezone,workLevel=175, sleepLevel=75) @@ -344,7 +349,7 @@ def setupAndRun(self, startDates, endDates): dates = np.unique(timeBins.date) columns = ['nation_id'] def to_datestring(x): return (x + timedelta(hours=24)).strftime('%Y-%m-%d') - columns.extend(map(to_datestring, dates)) + columns.extend(list(map(to_datestring, dates))) # Data that will be stored to summarise the run so it can be loaded in SUEWS national_attribs_used = [] # Store the unique national attributes used in the model @@ -504,7 +509,7 @@ def makeSUEWSnetcdfFiles(self, national_attribs_used, attrib_indexes, dates, tem self.loadModelResults(self.modelRoot) startTime = dates[0] - if 'extra_disagg' in self.processedDataList.keys(): + if 'extra_disagg' in list(self.processedDataList.keys()): gridIdFieldName = self.processedDataList['extra_disagg']['output_areas']['featureIds'] outLayer = loadShapeFile(self.processedDataList['extra_disagg']['output_areas']['file'], self.processedDataList['extra_disagg']['output_areas']['EPSG']) else: @@ -539,7 +544,7 @@ def makeSUEWSnetcdfFiles(self, national_attribs_used, attrib_indexes, dates, tem times.units = 'hours since ' + startTime.strftime('%Y-%m-%d %H:%M:%S') times.calendar = 'gregorian' def toHoursSinceStart(x): return (int(x.strftime('%j'))-1)*24 + x.hour - times[:] = np.array(map(toHoursSinceStart, timesToUse)) + times[:] = np.array(list(map(toHoursSinceStart, timesToUse))) if is_grid: dataset.createDimension('south_north', len(output_y)) @@ -558,7 +563,7 @@ def toHoursSinceStart(x): return (int(x.strftime('%j'))-1)*24 + x.hour # Sort the grid IDs into an order that allows data to be transformed quickly from series to grid # [y_pos, x_pos] # correctIdOrder = map(intOrString, list(pd.DataFrame(mappings).transpose().sort([1,0]).index)) - correctIdOrder = map(intOrString, list(pd.DataFrame(mappings).transpose().sort_values([1, 0]).index)) + correctIdOrder = list(map(intOrString, list(pd.DataFrame(mappings).transpose().sort_values([1, 0]).index))) # Set up data arrays for t in range(len(timesToUse)): # Read output file and add it to netCDF. Re-order each on the fly and reshape to matrix @@ -589,7 +594,7 @@ def toHoursSinceStart(x): return (int(x.strftime('%j'))-1)*24 + x.hour times = dataset.createVariable('time', np.int16, ('time',)) times.units = 'days since ' + dates[0].strftime('%Y-%m-%d %H:%M:%S') times.calendar = 'gregorian' - times[:] = range(len(temperatures)) + times[:] = list(range(len(temperatures))) temps = dataset.createVariable('temperature', np.float32, ('time',)) temps[:] = list(temperatures) @@ -597,7 +602,7 @@ def toHoursSinceStart(x): return (int(x.strftime('%j'))-1)*24 + x.hour dataset.createDimension('regional_id', len(national_attribs_used)) regionId = dataset.createVariable('regional_id', np.int16, ('regional_id',)) regionId.units = 'N/A' - regionId[:] = range(len(national_attribs_used)) + regionId[:] = list(range(len(national_attribs_used))) increasePerCDD = dataset.createVariable('increasePerCDD', np.float32, ('regional_id',)) increasePerCDD[:] = [x['increasePerCDD'] for x in national_attribs_used] increasePerHDD = dataset.createVariable('increasePerHDD', np.float32, ('regional_id',)) @@ -703,7 +708,7 @@ def loadModelResults(self, path): if not os.path.exists(path): raise Exception('Model output directory ' + str(path) + ' not found') - for sub in self.subFolders.values(): + for sub in list(self.subFolders.values()): directory = os.path.join(path, sub) if not os.path.exists(directory): raise Exception('Chosen model output folder ' + str(path) + ' did not contain enough subfolders to be genuine') @@ -778,7 +783,7 @@ def getOutputLayerInfo(self): :return: ''' # Just use one of the disaggregated layers, since these are using same feature mappings - if 'extra_disagg' in self.processedDataList.keys(): + if 'extra_disagg' in list(self.processedDataList.keys()): return self.processedDataList['extra_disagg']['output_areas'] else: return self.processedDataList['resPop'][0] diff --git a/LucyQF/PythonLUCY/LUCYDataSources.py b/LucyQF/PythonLUCY/LUCYDataSources.py index a668a59..2924dfb 100644 --- a/LucyQF/PythonLUCY/LUCYDataSources.py +++ b/LucyQF/PythonLUCY/LUCYDataSources.py @@ -1,3 +1,7 @@ +from builtins import str +from builtins import map +from builtins import range +from builtins import object from ...Utilities import f90nml as nml import os from datetime import datetime as dt @@ -24,7 +28,7 @@ def validFile(x): if not os.path.exists(x): raise ValueError('The diurnal input file ' + str(x) + ' was not found') -class LUCYDataSources: +class LUCYDataSources(object): ''' Loads the data sources namelist, conducts validation and structures inputs for use with data management routines ''' def __init__(self, configFile): @@ -40,7 +44,7 @@ def __init__(self, configFile): try: ds = nml.read(configFile) - except Exception, e: + except Exception as e: raise ValueError('Unable to read data sources config file at: ' + str(configFile)) self.inputFile = configFile @@ -53,12 +57,12 @@ def __init__(self, configFile): # Get the database try: self.database = ds['database']['path'] - except Exception,e: + except Exception as e: raise ValueError('Could not set LQF database locations: %s'%str(e)) if not os.path.exists(self.database): raise ValueError('LQF database file (%s) does not exist'%ds['database']) - missing = list(set(expectedKeys_spatial).difference(ds.keys())) + missing = list(set(expectedKeys_spatial).difference(list(ds.keys()))) if len(missing) > 0: raise ValueError('Spatial entries missing from ' + str(configFile) + ' in namelist: ' + str(missing)) @@ -66,7 +70,7 @@ def __init__(self, configFile): for subEntry in expectedKeys_spatial: content = ds[subEntry] # Check it's all lists or no lists - types = np.unique(map(type, content.values())) + types = np.unique(list(map(type, list(content.values())))) # are all required sub-entries present? if subEntry == "outputareas": # Special case for output areas expectedNames_spat = ['shapefile', 'epsgCode', 'featureIds'] @@ -75,16 +79,16 @@ def __init__(self, configFile): elif subEntry == "database": expectedNames_spat = ['path'] - missing = list(set(map(upper, expectedNames_spat)).difference(map(upper, content.keys()))) + missing = list(set(map(upper, expectedNames_spat)).difference(list(map(upper, list(content.keys()))))) if len(missing) > 0: raise ValueError('Entries missing from ' + subEntry + ' in namelist: ' + str(missing)) - for k in content.keys(): + for k in list(content.keys()): if content[k] == '': content[k] = None content[k] = [content[k]] - map(validateInput, content[expectedNames_spat[0]]) + list(map(validateInput, content[expectedNames_spat[0]])) # Having gotten this far means the entries are valid, so populate the object field if subEntry == "outputareas": # Special case for output areas @@ -94,8 +98,8 @@ def __init__(self, configFile): self.outputAreas_spat = entries else: try: - content['startDates'] = map(makeTimey, content['startDates']) - except Exception, e: + content['startDates'] = list(map(makeTimey, content['startDates'])) + except Exception as e: raise ValueError('One or more startDate entries is not in YYYY-mm-dd format for ' + subEntry + ':' + str(e)) # Ensure dates within a subentry are unique @@ -118,7 +122,7 @@ def __init__(self, configFile): self.database = content['path'][0] # Mandatory: Get temperature data - if 'dailytemperature' not in ds['temporal'].keys(): + if 'dailytemperature' not in list(ds['temporal'].keys()): raise ValueError('Temperature data file(s) not specified') if type(ds['temporal']['dailyTemperature']) is not list: @@ -138,7 +142,7 @@ def __init__(self, configFile): 'diurnTraffic':'Traffic flow', 'diurnMetab':'Metabolism'} for c in cycles: - if c.lower() not in ds['temporal'].keys(): + if c.lower() not in list(ds['temporal'].keys()): setattr(self, c, None) continue # No cycle(s) specified fileList = [] diff --git a/LucyQF/PythonLUCY/LUCYDiurnalProfile.py b/LucyQF/PythonLUCY/LUCYDiurnalProfile.py index 7609174..4d4a1e4 100644 --- a/LucyQF/PythonLUCY/LUCYDiurnalProfile.py +++ b/LucyQF/PythonLUCY/LUCYDiurnalProfile.py @@ -1,3 +1,7 @@ +from __future__ import absolute_import +from builtins import map +from builtins import range +from builtins import object # Object that stores and retrieves diurnal profiles for different seasons and times of day for LUCY # A profile is a week-long template of relative quantity e.g. traffic, energy use or metabolic rate used for scaling later. @@ -9,10 +13,10 @@ except: pass import pytz -from DataManagement.TemporalProfileSampler import TemporalProfileSampler, is_holiday -from DataManagement.LookupLogger import LookupLogger +from .DataManagement.TemporalProfileSampler import TemporalProfileSampler, is_holiday +from .DataManagement.LookupLogger import LookupLogger from datetime import timedelta, datetime -class LUCYDiurnalProfile: +class LUCYDiurnalProfile(object): def __init__(self, areaTimezone, weekendDays, use_uk_holidays, other_holidays = [], logger= LookupLogger()): ''' Instantiate :param areaTimezone: Time zone string defining the study area's time zone @@ -52,7 +56,7 @@ def dealWithInputFile(self, file): dl = pd.read_csv(file,skipinitialspace=True, header=None) # Expect certain keywords - rowHeadings = map(lower, dl[0][0:4]) + rowHeadings = list(map(lower, dl[0][0:4])) if 'season' != rowHeadings[0]: raise ValueError('First column of row 1 must be \'Season\' in ' + file) @@ -68,9 +72,9 @@ def dealWithInputFile(self, file): firstDataLine = 4 # Try to extract the timezone from the file header try: - tz = pytz.timezone(dl[dl.keys()[1]][3]) + tz = pytz.timezone(dl[list(dl.keys())[1]][3]) except Exception: - raise ValueError('Invalid timezone "' + dl[dl.keys()[1]][3] + '" specified in ' + file + + raise ValueError('Invalid timezone "' + dl[list(dl.keys())[1]][3] + '" specified in ' + file + '. This should be of the form "UTC" or "Europe/London" as per python timezone documentation') # Go through in triplets gathering up data for a template week @@ -81,7 +85,7 @@ def dealWithInputFile(self, file): try: sd = pd.datetime.strptime(dl[seasonStart][1], '%Y-%m-%d') ed = pd.datetime.strptime(dl[seasonStart][2], '%Y-%m-%d') - except Exception, e: + except Exception as e: raise Exception('Rows 2 and 3 of ' + file + ' must be dates in the format YYYY-mm-dd') sd = tz.localize(sd) @@ -101,7 +105,7 @@ def dealWithInputFile(self, file): week = dl[seasonStart][firstDataLine:].astype('float') # Normalize each day's values by that day's sum so hourly relative variations are derived for day in range(0,7): - hours = range(day*24, (day+1)*24) + hours = list(range(day*24, (day+1)*24)) week.iloc[hours] = week.iloc[hours] / week.iloc[hours].sum() self.addWeeklyCycle(sd, ed, week) diff --git a/LucyQF/PythonLUCY/LUCYParams.py b/LucyQF/PythonLUCY/LUCYParams.py index 09e2735..c1f7017 100644 --- a/LucyQF/PythonLUCY/LUCYParams.py +++ b/LucyQF/PythonLUCY/LUCYParams.py @@ -1,3 +1,7 @@ +from builtins import map +from builtins import str +from builtins import range +from builtins import object import os from ...Utilities import f90nml from datetime import datetime as dt @@ -5,7 +9,7 @@ import string -class LUCYParams: +class LUCYParams(object): def __init__(self, file): ''' Read LUCY parameters file and return object or exception @@ -24,15 +28,15 @@ def __init__(self, file): self.avg_speed = float(nml['params']['avgspeed']) if self.avg_speed > 64000: raise ValueError('Average vehicle speed must not exceed 64 kph') - self.emission_factors = map(float, nml['params']['emissionfactors']) + self.emission_factors = list(map(float, nml['params']['emissionfactors'])) self.BP_temp = float(nml['params']['balance_point_temperature']) self.QV_multfactor = float(nml['params']['QV_multfactor']) self.sleep_metab = float(nml['params']['sleep_metab']) self.work_metab = float(nml['params']['work_metab']) - except ValueError, e: + except ValueError as e: raise ValueError('Invalid parameter provided: ' + str(e)) - except KeyError,e: + except KeyError as e: raise KeyError('Entry missing from parameters file: ' + str(e)) # Model date options @@ -47,13 +51,13 @@ def __init__(self, file): self.use_uk_hols = True if nml['params']['use_uk_holidays'] == 1 else False self.use_custom_hols = True if nml['params']['use_custom_holidays'] == 1 else False self.custom_holidays = [] - except KeyError, e: + except KeyError as e: raise KeyError('Entry %s not found in parameters file'%(str(e),)) if self.use_custom_hols: # Only try and deal with custom holidays entry if it's set to 1 def toDate(x): return dt.strptime(x, '%Y-%m-%d').date() try: - self.custom_holidays = map(toDate, nml['params']['custom_holidays']) + self.custom_holidays = list(map(toDate, nml['params']['custom_holidays'])) except Exception: raise ValueError('Custom holidays in parameters file must be in formatted YYYY-mm-dd') @@ -70,7 +74,7 @@ def lcw(self, PARAMS, paramsFile): # Validate land cover weightings (used for additional disaggregation) expectedClasses = ["paved", "buildings", "evergreentrees", "decidioustrees", "grass", "baresoil", "water"] types = ['building', 'transport', 'metabolism'] - missing = set(map(lower,expectedClasses)).difference(map(lower, PARAMS['landCoverWeights'].keys())) + missing = set(map(lower,expectedClasses)).difference(list(map(lower, list(PARAMS['landCoverWeights'].keys())))) if len(missing) > 0: raise ValueError(paramsFile + ' is missing the following entries under "landCoverWeights": ' + string.join(list(missing), ",")) @@ -99,7 +103,7 @@ def tresp(self, PARAMS, paramsFile): expectedEntries = ['Th', 'Tc', 'Ah', 'Ac', 'c', 'Tmax', 'Tmin'] - missing = set(map(lower, expectedEntries)).difference(map(lower, PARAMS['CustomTemperatureResponse'].keys())) + missing = set(map(lower, expectedEntries)).difference(list(map(lower, list(PARAMS['CustomTemperatureResponse'].keys())))) if len(missing) > 0: raise ValueError(paramsFile + ' is missing the following entries under "CustomTemperatureResponse": ' + string.join(list(missing), ",")) self.TResponse = {} @@ -108,6 +112,6 @@ def tresp(self, PARAMS, paramsFile): self.TResponse[cl] = float(PARAMS['CustomTemperatureResponse'][cl.lower()]) except ValueError: raise ValueError('Custom temperature response parameters had invalid number in entry: ' + str(cl)) - except Exception, e: + except Exception as e: raise Exception(str(e)) diff --git a/LucyQF/PythonLUCY/LUCYfunctions.py b/LucyQF/PythonLUCY/LUCYfunctions.py index 2b55f74..ef5f760 100644 --- a/LucyQF/PythonLUCY/LUCYfunctions.py +++ b/LucyQF/PythonLUCY/LUCYfunctions.py @@ -1,3 +1,5 @@ +from __future__ import print_function +from __future__ import absolute_import # LUCY core calculations try: import pandas as pd @@ -145,13 +147,14 @@ def qt(avg_speed, vehicle_count, areas, emission_factors, profile): def testIt(): # Run integrated test - from DailyTemperature import DailyTemperature + from .DailyTemperature import DailyTemperature attribs = pd.Series({'summer_cooling':1, 'increasePerCDD':0.0010, 'increasePerHDD':0.00060, 'offset':0.8, 'ecostatus':2}).to_frame() - print attribs + # fix_print_with_import + print(attribs) dr = pd.date_range(pd.datetime.strptime('2013-01-01 12:00', '%Y-%m-%d %H:%M'), pd.datetime.strptime('2013-01-30 12:00', '%Y-%m-%d %H:%M'), tz="UTC") te = DailyTemperature("Asia/Shanghai", use_uk_holidays=False, weekendDays= [], other_holidays=[]) @@ -159,5 +162,6 @@ def testIt(): #a.addTemperatureData('N:\QF_Heraklion\LUCYConfig\dailyTemperature_2016_Heraklion.csv') for dt in dr: a = getTMF(te.getTemp(dt.to_datetime(), 3600)[0], 12, attribs) - print a + # fix_print_with_import + print(a) diff --git a/LucyQF/PythonLUCY/MetabolismProfiles.py b/LucyQF/PythonLUCY/MetabolismProfiles.py index 829e4c3..91514d6 100644 --- a/LucyQF/PythonLUCY/MetabolismProfiles.py +++ b/LucyQF/PythonLUCY/MetabolismProfiles.py @@ -1,3 +1,6 @@ +from __future__ import print_function +from builtins import str +from builtins import object # Object that creates diurnal metabolism profiles try: @@ -9,7 +12,7 @@ import pytz from datetime import timedelta -class MetabolismProfiles: +class MetabolismProfiles(object): def __init__(self, timezoneCountry, sleepLevel, workLevel): ''' Instantiate @@ -87,10 +90,14 @@ def getWattPerson(self, timeBinEnd, timeBinDuration, medianAwake, medianAsleep, def testIt(): a = MetabolismProfiles('Europe/Athens', 75, 175) times = pd.date_range(start='2015-01-01 01:00', freq='3600s', periods=96, tz='UTC') - print a.getWattPerson(pd.Timestamp('2015-01-02 12:00:00+0000', offset='H'), 3600, 6.0, 22.0, 2.0) + # fix_print_with_import + print(a.getWattPerson(pd.Timestamp('2015-01-02 12:00:00+0000', offset='H'), 3600, 6.0, 22.0, 2.0)) for t in times: - print str(t) + ' ' +str(a.getWattPerson(t, 1800, 8, 20, 2)) + # fix_print_with_import + print(str(t) + ' ' +str(a.getWattPerson(t, 1800, 8, 20, 2))) - print times[0] - print times[0].to_datetime() + # fix_print_with_import + print(times[0]) + # fix_print_with_import + print(times[0].to_datetime()) diff --git a/LucyQF/PythonLUCY/Population.py b/LucyQF/PythonLUCY/Population.py index f6ec24f..d707768 100644 --- a/LucyQF/PythonLUCY/Population.py +++ b/LucyQF/PythonLUCY/Population.py @@ -1,11 +1,13 @@ +from __future__ import absolute_import +from builtins import object from datetime import datetime -from PyQt4.QtCore import QSettings +from qgis.PyQt.QtCore import QSettings -from DataManagement.SpatialTemporalResampler_LUCY import SpatialTemporalResampler_LUCY -from DataManagement.spatialHelpers import * -from DataManagement.LookupLogger import LookupLogger -class Population: +from .DataManagement.SpatialTemporalResampler_LUCY import SpatialTemporalResampler_LUCY +from .DataManagement.spatialHelpers import * +from .DataManagement.LookupLogger import LookupLogger +class Population(object): # Store spatially and temporally resolved residential population # Provides population density for each feateure # Makes heavy use of QGIS API diff --git a/LucyQF/PythonLUCY/RegionalParameters.py b/LucyQF/PythonLUCY/RegionalParameters.py index 2f08ea0..3ba16d0 100644 --- a/LucyQF/PythonLUCY/RegionalParameters.py +++ b/LucyQF/PythonLUCY/RegionalParameters.py @@ -1,15 +1,20 @@ +from __future__ import absolute_import +from builtins import map +from builtins import str +from builtins import range +from builtins import object from datetime import datetime -from PyQt4.QtCore import QSettings -from qgis.core import QgsDataSourceURI, QgsMapLayerRegistry, QgsMessageLog -from DataManagement.SpatialAttributesSampler import SpatialAttributesSampler -from DataManagement.spatialHelpers import * -from DataManagement.LookupLogger import LookupLogger +from qgis.PyQt.QtCore import QSettings +from qgis.core import QgsDataSourceUri +from .DataManagement.SpatialAttributesSampler import SpatialAttributesSampler +from .DataManagement.spatialHelpers import * +from .DataManagement.LookupLogger import LookupLogger import sqlite3 as lite from calendar import isleap def addQuotes(x): return "'" + x + "'" -class RegionalParameters: +class RegionalParameters(object): # Translate spatially and temporally resolved country-specific parameters to model output polygons. Samples values # rather than downscaling them. # Parameters: Times of waking, sleeping and duration over which sleep/wake transition occurs; economic status rating; summer cooling @@ -55,7 +60,7 @@ def setWorldDatabase(self, database): if not os.path.exists(database): raise ValueError('LQF Database file ' + database + ' not found') self.databaseLocation = database - self.dburi = QgsDataSourceURI() + self.dburi = QgsDataSourceUri() self.dburi.setDatabase(database) self.dbschema = '' self.dbtable = 'World' @@ -119,7 +124,7 @@ def setOutputShapefile(self, filename, epsgCode, id_field=None): # Assign this population as vehicle, residential and metabolisng population data frames # This can be overriden later to inject specific distributions for the population types df = shapefile_attributes(self.attributedOutputLayer) - df.index = map(intOrString, df[self.worldAttributes.templateIdField]) + df.index = list(map(intOrString, df[self.worldAttributes.templateIdField])) self.countryAssignments = df return self.attributedOutputLayer # This should be saved so it can be used with self.injectSampledLayer to save time later @@ -134,7 +139,7 @@ def injectMetabPopLayer(self, filename, epsgCode): ''' lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer') ser = shapefile_attributes(lyr) - ser.index = map(intOrString, ser[self.worldAttributes.templateIdField]) + ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField])) self.metabPop = ser['Pop'] lyr = None @@ -148,7 +153,7 @@ def injectVehPopLayer(self, filename, epsgCode): ''' lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer') ser = shapefile_attributes(lyr) - ser.index = map(intOrString, ser[self.worldAttributes.templateIdField]) + ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField])) self.vehPop = ser['Pop'] lyr = None @@ -162,7 +167,7 @@ def injectResPopLayer(self, filename, epsgCode): ''' lyr = openShapeFileInMemory(filename, epsgCode, 'temp layer') ser = shapefile_attributes(lyr) - ser.index = map(intOrString, ser[self.worldAttributes.templateIdField]) + ser.index = list(map(intOrString, ser[self.worldAttributes.templateIdField])) self.resPops = ser['Pop'] lyr = None @@ -191,7 +196,7 @@ def injectAttributedOutputLayer(self, filename, epsgCode, id_field=None): con = lite.connect(self.databaseLocation) self.extractPropertiesForCountries(con, countries) ca = shapefile_attributes(self.attributedOutputLayer) - ca.index = map(intOrString, ca[self.worldAttributes.templateIdField]) + ca.index = list(map(intOrString, ca[self.worldAttributes.templateIdField])) self.countryAssignments = ca def extractPropertiesForCountries(self, con, countries): @@ -298,7 +303,7 @@ def isWeekend(self, featureIds, date): ''' # Get list of 1 or 0 with no index. days = self.countryAssignments.loc[featureIds].join(self.weekendDays[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']], on='admin')[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']] - days.columns = range(0,7) + days.columns = list(range(0,7)) return days[date.weekday()] > 0 def getWeekendDaysByRegion(self): @@ -306,7 +311,7 @@ def getWeekendDaysByRegion(self): :return: dict of {country: [int, int]} that shows which days of the week (0-6 = Monday-Sunday) are weekend days ''' tempDays = self.weekendDays[['Mon', 'Tue', 'Wed', 'Thu', 'Fri','Sat', 'Sun']] - tempDays.columns = range(7) + tempDays.columns = list(range(7)) return {idx: tempDays.columns[tempDays.loc[idx] > 0] for idx in tempDays.index} # def getCyclesForFeatureIDs(self, featureIds, weekend): @@ -367,7 +372,7 @@ def getAttribsTable(self, featureId, requestYear): # Country assignments # This can be overriden later to inject specific distributions for the population types countryAssignments = shapefile_attributes(self.attributedOutputLayer) - countryAssignments.index = map(intOrString, countryAssignments[self.worldAttributes.templateIdField]) + countryAssignments.index = list(map(intOrString, countryAssignments[self.worldAttributes.templateIdField])) # Get national attributes for each country attrs = self.getNationalAttributes(requestYear) @@ -425,7 +430,7 @@ def getFixedHolidays(self, startDate, endDate): def doyToLeapDatetime(x, year): return datetime.strptime(str(year)+str(x+1 if (x > 59) and isleap(year) else x), '%Y%j').date() output = {} - for c in self.fixedHolidays.keys(): + for c in list(self.fixedHolidays.keys()): output[c] = [] for y in range(startDate.year, endDate.year+1): null = [output[c].append(doyToLeapDatetime(d, y)) for d in self.fixedHolidays[c]] diff --git a/LucyQF/PythonLUCY/RunParamsDialog.py b/LucyQF/PythonLUCY/RunParamsDialog.py index 586e4a7..1cdc2b7 100644 --- a/LucyQF/PythonLUCY/RunParamsDialog.py +++ b/LucyQF/PythonLUCY/RunParamsDialog.py @@ -1,11 +1,13 @@ -from PyQt4 import QtGui, uic -from PyQt4.QtGui import QListWidgetItem, QDialog -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplicationm -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog +# from PyQt4 import QtGui, uic +# from PyQt4.QtGui import QListWidgetItem, QDialog +# from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplicationm +# from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog import os FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'RunParamsDialog.ui')) -from matplotlib import pyplot -from datetime import datetime as dt +# from matplotlib import pyplot +# from datetime import datetime as dt class RunParamsDialog(QDialog, FORM_CLASS): diff --git a/LucyQF/time_displayer.py b/LucyQF/time_displayer.py index 1a8abb3..9089e19 100644 --- a/LucyQF/time_displayer.py +++ b/LucyQF/time_displayer.py @@ -1,20 +1,25 @@ -from PyQt4 import QtGui, uic -from PyQt4.QtGui import QListWidgetItem -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import absolute_import +from builtins import map +from builtins import str +from qgis.PyQt import QtGui, uic +from qgis.PyQt.QtWidgets import QListWidgetItem +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon import os FORM_CLASS, _ = uic.loadUiType(os.path.join(os.path.dirname(__file__), 'time_displayer.ui')) from qgis.core import QgsMessageLog, QgsMapLayerRegistry, QgsVectorLayer, QgsMapRenderer, QgsRectangle# -from PythonLUCY.DataManagement.spatialHelpers import populateShapefileFromTemplate, colourRanges, openShapeFileInMemory, duplicateVectorLayer +from .PythonLUCY.DataManagement.spatialHelpers import populateShapefileFromTemplate, colourRanges, openShapeFileInMemory, duplicateVectorLayer try: import pandas as pd from matplotlib import pyplot except: pass from datetime import datetime as dt -from PyQt4.QtGui import QImage, QColor, QPainter, QMessageBox -from PyQt4.QtCore import QSize +from qgis.PyQt.QtGui import QImage, QColor, QPainter +from qgis.PyQt.QtWidgets import QMessageBox +from qgis.PyQt.QtCore import QSize # Creates a dialog box that allow different model output time slices to be visualised in QGIS def intOrString(x): @@ -68,7 +73,7 @@ def makeTimeseries(self): :return: None ''' id = self.lstAreas.currentItem().text() - result = self.model.fetchResultsForLocation(intOrString(id), dt(1900,01,01), dt(2200,01,01)) + result = self.model.fetchResultsForLocation(intOrString(id), dt(1900,0o1,0o1), dt(2200,0o1,0o1)) # Are there any valid results here? if len(result['Qf'].dropna()) == 0: QMessageBox.critical(None, 'No Data', 'This output area contains no data') @@ -97,7 +102,7 @@ def populateTimeList(self): :return: ''' def toString(x): return x.strftime('%Y-%m-%d %H:%M') - timeLabels = map(toString, self.model.getTimeSteps()) + timeLabels = list(map(toString, self.model.getTimeSteps())) for label in timeLabels: time = QListWidgetItem(label) self.lstTimes.addItem(time) @@ -129,9 +134,9 @@ def updateDisplay(self): range_maxima = [0.000001, 0.1, 1, 10, 100, 1000] colours = ['#CECECE', '#FEE6CE', '#FDAE6B', '#F16913', '#D94801', '#7F2704'] opacity = 1 - for component in self.componentTranslation.values(): + for component in list(self.componentTranslation.values()): layerName = component + t.strftime(' %Y-%m-%d %H:%M UTC') - if component == self.componentTranslation.values()[0]: + if component == list(self.componentTranslation.values())[0]: colourRanges(new_layer, component, opacity, range_minima, range_maxima, colours) new_layer.setLayerName(layerName) layerId = new_layer.id() diff --git a/MetdataProcessor/icon.png b/MetdataProcessor/icon.png new file mode 100644 index 0000000..f696c00 Binary files /dev/null and b/MetdataProcessor/icon.png differ diff --git a/MetdataProcessor/metdata_processor.py b/MetdataProcessor/metdata_processor.py index 891186c..4b7daa3 100644 --- a/MetdataProcessor/metdata_processor.py +++ b/MetdataProcessor/metdata_processor.py @@ -20,15 +20,20 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QFileDialog, QMessageBox, QIcon, QAction -from metdata_processor_dialog import MetdataProcessorDialog +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QFileDialog, QMessageBox, QAction +from qgis.PyQt.QtGui import QIcon +from .metdata_processor_dialog import MetdataProcessorDialog import os.path import numpy as np import webbrowser -class MetdataProcessor: +class MetdataProcessor(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -230,7 +235,7 @@ def import_file(self): QMessageBox.information(self.dlg, "File imported", "If invalid data was detected such as strings or " "other non-numrical characters, these data points could " "result in that the MetdataProcessor " - "will fail to create your UMEP-formatted inputdata.", 'Continue') + "will fail to create your UMEP-formatted inputdata.") #, 'Continue' except Exception as e: QMessageBox.critical(self.dlg, "Error: Check the number of columns in each line", str(e)) return @@ -238,8 +243,8 @@ def import_file(self): def start_progress(self): outputfile = self.fileDialog.getSaveFileName(None, "Save File As:", None, "Text Files (*.txt)") - - if not outputfile: + # print(outputfile[0]) + if not outputfile[0]: QMessageBox.critical(None, "Error", "An output text file (.txt) must be specified") return @@ -625,16 +630,19 @@ def start_progress(self): # '%6.2f %6.2f %6.2f %6.2f %6.2f %6.2f %6.2f' numformat = '%d %d %d %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f ' \ '%.2f %.2f %.2f %.2f %.2f %.2f %.2f' - np.savetxt(outputfile, met_new, fmt=numformat, header=header, comments='') + np.savetxt(outputfile[0], met_new, fmt=numformat, header=header, comments='') self.dlg.progressBar.setValue(23) QMessageBox.information(None, "Metdata pre-processor", "Input data to UMEP processor generated") def run(self): + """Run method that performs all the real work""" + # show the dialog self.dlg.show() self.dlg.exec_() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Meteorological%20Data%20MetPreprocessor.html' + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Meteorological_Data:_MetPreprocessor' webbrowser.open_new_tab(url) diff --git a/MetdataProcessor/metdata_processor_dialog.py b/MetdataProcessor/metdata_processor_dialog.py index 222663f..0ec6f7c 100644 --- a/MetdataProcessor/metdata_processor_dialog.py +++ b/MetdataProcessor/metdata_processor_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'metdata_processor_dialog_base.ui')) -class MetdataProcessorDialog(QtGui.QDialog, FORM_CLASS): +class MetdataProcessorDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(MetdataProcessorDialog, self).__init__(parent) diff --git a/MetdataProcessor/pb_tool.cfg b/MetdataProcessor/pb_tool.cfg new file mode 100644 index 0000000..1968f3f --- /dev/null +++ b/MetdataProcessor/pb_tool.cfg @@ -0,0 +1,74 @@ +#/*************************************************************************** +# MetdataProcessor +# +# Configuration file for plugin builder tool (pb_tool) +# ------------------- +# begin : 2015-06-06 +# copyright : (C) 2015 by Fredrik Lindberg +# email : fredrikl@gvc.gu.se +# ***************************************************************************/ +# +#/*************************************************************************** +# * * +# * This program is free software; you can redistribute it and/or modify * +# * it under the terms of the GNU General Public License as published by * +# * the Free Software Foundation; either version 2 of the License, or * +# * (at your option) any later version. * +# * * +# ***************************************************************************/ +# +# +# You can install pb_tool using: +# pip install http://geoapt.net/files/pb_tool.zip +# +# Consider doing your development (and install of pb_tool) in a virtualenv. +# +# For details on setting up and using pb_tool, see: +# http://spatialgalaxy.net/qgis-plugin-development-with-pb_tool +# +# Issues and pull requests here: +# https://github.com/g-sherman/plugin_build_tool: +# +# Sane defaults for your plugin generated by the Plugin Builder are +# already set below. +# +# As you add Python source files and UI files to your plugin, add +# them to the appropriate [files] section below. + +[plugin] +# Name of the plugin. This is the name of the directory that will +# be created in .qgis2/python/plugins +name: MetdataProcessor + +[files] +# Python files that should be deployed with the plugin +python_files: __init__.py metdata_processor.py metdata_processor_dialog.py suewsdataprocessing_v4.py + +# The main dialog file that is loaded (not compiled) +main_dialog: metdata_processor_dialog_base.ui + +# Other ui files for dialogs you create (these will be compiled) +compiled_ui_files: + +# Resource file(s) that will be compiled +resource_files: resources.qrc + +# Other files required for the plugin +extras: icon.png metadata.txt + +# Other directories to be deployed with the plugin. +# These must be subdirectories under the plugin directory +extra_dirs: + +# ISO code(s) for any locales (translations), separated by spaces. +# Corresponding .ts files must exist in the i18n directory +locales: + +[help] +# the built help directory that should be deployed with the plugin +dir: help/build/html +# the name of the directory to target in the deployed plugin +target: help + + + diff --git a/MetdataProcessor/resources_rc.py b/MetdataProcessor/resources_rc.py index b2c3cb4..fec1e04 100644 --- a/MetdataProcessor/resources_rc.py +++ b/MetdataProcessor/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/SEBE/SEBEfiles/Perez_v3.py b/SEBE/SEBEfiles/Perez_v3.py index 2b1fa7d..6f1d385 100644 --- a/SEBE/SEBEfiles/Perez_v3.py +++ b/SEBE/SEBEfiles/Perez_v3.py @@ -1,4 +1,6 @@ from __future__ import division +from __future__ import print_function +from builtins import range import numpy as np @@ -181,7 +183,7 @@ def Perez_v3(zen, azimuth, radD, radI, jday, patchchoice): skyvaultazi = np.empty((90, 361)) for j in range(90): skyvaultalt[j, :] = 91-j - skyvaultazi[j, :] = range(361) + skyvaultazi[j, :] = list(range(361)) elif patchchoice == 1: # Creating skyvault of patches of constant radians (Tregeneza and Sharples, 1993) diff --git a/SEBE/SEBEfiles/get_ders.py b/SEBE/SEBEfiles/get_ders.py index b2d999b..bf09698 100644 --- a/SEBE/SEBEfiles/get_ders.py +++ b/SEBE/SEBEfiles/get_ders.py @@ -1,4 +1,5 @@ from __future__ import division +from __future__ import print_function try: from osgeo import gdal, gdal_array @@ -26,7 +27,8 @@ def get_temp_file(suffix=""): os.remove(temp_filename) except: pass - print temp_filename + # fix_print_with_import + print(temp_filename) return temp_filename def get_slope_aspect_gdal(dem_file): diff --git a/SEBE/SEBEfiles/importdata.py b/SEBE/SEBEfiles/importdata.py index ccfe089..fccf357 100644 --- a/SEBE/SEBEfiles/importdata.py +++ b/SEBE/SEBEfiles/importdata.py @@ -40,6 +40,8 @@ @seealso{textscan, dlmread, csvread, load} @end deftypefn """ +from __future__ import print_function +from builtins import str import os from PIL import Image @@ -126,13 +128,13 @@ def importdata(*args): # If there are any empty elements in the output dict, then remove them if isinstance(output, dict) and len(output) == 1: - for key, val in output.copy().iteritems(): # copy() for py3 compatibility or use items() + for key, val in output.copy().items(): # copy() for py3 compatibility or use items() if not val: del output[key] # If only one element is left, replace the dict with the element, i.e. output = output['onlyFieldLeft'] # Update the list of fields - fields = output.keys() + fields = list(output.keys()) if len(fields) == 1: output = output[fields[0]] @@ -172,7 +174,7 @@ def importdata_ascii(fileName, delimiter, headerRows): # Put the header rows in output.textdata. if headerRows > 0: for i, el in enumerate(fileContentRows[0:headerRows]): - output['textdata'].append(unicode(el)) # struct in ML is converted to dict in py + output['textdata'].append(str(el)) # struct in ML is converted to dict in py # If space is the delimiter, then remove spaces in the beginning of each data row. if delimiter is ' ': @@ -201,11 +203,13 @@ def importdata_ascii(fileName, delimiter, headerRows): try: output['data'][i, j] = float(el) except ValueError: - output['textdata'].append(unicode(el)) # using tuple (i,j) as key to dict textdata + output['textdata'].append(str(el)) # using tuple (i,j) as key to dict textdata # Check wether rowheaders or colheaders should be used - print "text data" - print output['textdata'] + # fix_print_with_import + print("text data") + # fix_print_with_import + print(output['textdata']) if headerRows == dataColumns and len(output['textdata']) == 1: # getting the col size, assuming # the dict is equivalent of struct in Matlab output['rowheaders'] = output['textdata'] diff --git a/SEBE/SEBEfiles/sunmapcreator_2015a.py b/SEBE/SEBEfiles/sunmapcreator_2015a.py index 04931c1..3c605f0 100644 --- a/SEBE/SEBEfiles/sunmapcreator_2015a.py +++ b/SEBE/SEBEfiles/sunmapcreator_2015a.py @@ -1,7 +1,9 @@ from __future__ import division +from __future__ import absolute_import +from builtins import range import numpy as np from ...Utilities.SEBESOLWEIGCommonFiles.diffusefraction import diffusefraction -from Perez_v3 import Perez_v3 +from .Perez_v3 import Perez_v3 from ...Utilities.SEBESOLWEIGCommonFiles.clearnessindex_2013b import clearnessindex_2013b diff --git a/SEBE/WriteMetaDataSEBE.py b/SEBE/WriteMetaDataSEBE.py new file mode 100644 index 0000000..dc90d73 --- /dev/null +++ b/SEBE/WriteMetaDataSEBE.py @@ -0,0 +1,67 @@ +from builtins import str +# This file prints out run information used for each specific run +from time import strftime +from osgeo import osr + + +def writeRunInfo(folderPath, filepath_dsm, gdal_dsm, usevegdem, filePath_cdsm, trunkfile, filePath_tdsm, lat, lon, UTC, + filePath_metfile, albedo, onlyglobal, trunkratio, trans, rows, cols): + + with open(folderPath + '/RunInfoSEBE.txt', 'w') as file: + file.write('This file provides run settings for the SEBE run initiated at: ' + + strftime("%a, %d %b %Y %H:%M:%S")) + file.write('\n') + file.write('Version: ' + 'SEBE v2015a') + file.write('\n') + file.write('\n') + file.write('SURFACE DATA') + file.write('\n') + file.write('Digital surface model (DSM): ' + filepath_dsm) + file.write('\n') + file.write('Model domain: rows = ' + str(rows) + ', columns = ' + str(cols)) + file.write('\n') + # get CRS + prj = gdal_dsm.GetProjection() + srs = osr.SpatialReference(wkt=prj) + if srs.IsProjected: + file.write('Projected referece system: ' + srs.GetAttrValue('projcs')) + file.write('\n') + file.write('Geographical coordinate system: ' + srs.GetAttrValue('geogcs')) + file.write('\n') + file.write('Latitude: ' + str(lat)) + file.write('\n') + file.write('Longitude: ' + str(lon)) + file.write('\n') + file.write('UTC: ' + str(UTC)) + file.write('\n') + if usevegdem == 1: + file.write('Transmissivity of light through vegetation: ' + str(trans)) + file.write('\n') + file.write('Digital vegetation canopy model (CDSM): ' + filePath_cdsm) + file.write('\n') + if trunkfile == 1: + file.write('Digital vegetation zrunk zone model (TDSM): ' + filePath_tdsm) + file.write('\n') + else: + file.write('Trunkzone estimated from CDSM') + file.write('\n') + file.write('Trunkzone as percent of canopy height: ' + str(trunkratio)) + file.write('\n') + else: + file.write('Vegetation scheme inactive') + file.write('\n') + file.write('\n') + file.write('METEOROLOGICAL FORCING DATA') + file.write('\n') + file.write('Meteorological file: ' + filePath_metfile) + file.write('\n') + if onlyglobal == 1: + file.write('Diffuse and direct shortwave radiation estimated from global radiation') + file.write('\n') + + file.write('\n') + file.write('ENVIRONMENTAL PARAMETERS') + file.write('\n') + file.write('Albedo: ' + str(albedo)) + file.write('\n') + file.close() diff --git a/SEBE/resources_rc.py b/SEBE/resources_rc.py index a7e9c1a..5c4a70f 100644 --- a/SEBE/resources_rc.py +++ b/SEBE/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/SEBE/sebe.py b/SEBE/sebe.py index e3c53f5..c3c2699 100644 --- a/SEBE/sebe.py +++ b/SEBE/sebe.py @@ -20,22 +20,29 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QThread -from PyQt4.QtGui import QFileDialog, QIcon, QAction, QMessageBox +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QThread +from qgis.PyQt.QtWidgets import QFileDialog, QAction, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.core import * from qgis.gui import * -from sebe_dialog import SEBEDialog +from .sebe_dialog import SEBEDialog import os.path from ..Utilities.misc import * from osgeo import gdal, osr import numpy as np -from sebeworker import Worker +from .sebeworker import Worker from ..Utilities.SEBESOLWEIGCommonFiles.Solweig_v2015_metdata_noload import Solweig_2015a_metdata_noload -from SEBEfiles.sunmapcreator_2015a import sunmapcreator_2015a +from .SEBEfiles.sunmapcreator_2015a import sunmapcreator_2015a import webbrowser +from . import WriteMetaDataSEBE -class SEBE: +class SEBE(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -73,8 +80,10 @@ def __init__(self, iface): self.dlg.pushButtonImport.clicked.connect(self.read_metdata) self.dlg.pushButtonSaveIrradiance.clicked.connect(self.save_radmat) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) self.fileDialogFile = QFileDialog() # Declare instance attributes @@ -235,7 +244,7 @@ def read_metdata(self): if self.metdata.shape[1] == 24: self.iface.messageBar().pushMessage("SEBE", "Meteorological data succefully loaded", - level=QgsMessageBar.INFO, duration=3) + level=Qgis.Info, duration=3) else: QMessageBox.critical(None, "Import Error", "Wrong number of columns in meteorological data. You can " "prepare your data by using 'Prepare Existing Data' in " @@ -318,6 +327,8 @@ def start_progress(self): QMessageBox.warning(None, "Huge grid", "This process will take a very long time. " "Go home for the weekend or consider to tile your grid") + trunkfile = 0 + trunkratio = 0 if self.dlg.checkBoxUseVeg.isChecked(): self.usevegdem = 1 self.vegdsm = self.layerComboManagerVEGDSM.currentLayer() @@ -326,11 +337,11 @@ def start_progress(self): QMessageBox.critical(None, "Error", "No valid vegetation DSM selected") return - # load raster + # load vegetation raster gdal.AllRegister() provider = self.vegdsm.dataProvider() - filePathOld = str(provider.dataSourceUri()) - dataSet = gdal.Open(filePathOld) + filePath_cdsm = str(provider.dataSourceUri()) + dataSet = gdal.Open(filePath_cdsm) self.vegdsm = dataSet.ReadAsArray().astype(np.float) vegsizex = self.vegdsm.shape[0] @@ -350,10 +361,12 @@ def start_progress(self): # load raster gdal.AllRegister() provider = self.vegdsm2.dataProvider() - filePathOld = str(provider.dataSourceUri()) - dataSet = gdal.Open(filePathOld) + filePath_tdsm = str(provider.dataSourceUri()) + dataSet = gdal.Open(filePath_tdsm) self.vegdsm2 = dataSet.ReadAsArray().astype(np.float) + trunkfile = 1 else: + filePath_tdsm = None trunkratio = self.dlg.spinBoxTrunkHeight.value() / 100.0 self.vegdsm2 = self.vegdsm * trunkratio @@ -367,6 +380,8 @@ def start_progress(self): self.vegdsm = 0 self.vegdsm2 = 0 self.usevegdem = 0 + filePath_cdsm = None + filePath_tdsm = None UTC = self.dlg.spinBoxUTC.value() psi = self.dlg.spinBoxTrans.value() / 100.0 @@ -438,46 +453,14 @@ def start_progress(self): building_slope, building_aspect = get_ders(self.dsm, self.scale) calc_month = False # TODO: Month not implemented + WriteMetaDataSEBE.writeRunInfo(self.folderPath[0], filepath_dsm, self.gdal_dsm, self.usevegdem, + filePath_cdsm, trunkfile, filePath_tdsm, lat, lon, UTC, + self.folderPathMetdata[0], albedo, onlyglobal, trunkratio, psi, sizex, sizey) + self.startWorker(self.dsm, self.scale, building_slope,building_aspect, voxelheight, sizey, sizex, self.vegdsm, self.vegdsm2, self.wheight, self.waspect, albedo, psi, radmatI, radmatD, radmatR, self.usevegdem, calc_month, self.dlg) - ## Den har functionen vill jag ha som en worker med fungerande progressbar. 145 itereringar - # Energyyearroof, Energyyearwall, vegdata = SEBE_2015a_calc.SEBE_2015a_calc(self.dsm, self.scale, building_slope, - # building_aspect, voxelheight, sizey, sizex, self.vegdsm, self.vegdsm2, self.wheight, - # self.waspect, albedo, psi, radmatI, radmatD, radmatR, self.usevegdem, calc_month) - - # filenameroof = self.folderPath[0] + '/Energyyearroof.tif' - # saveraster(self.gdal_dsm, filenameroof, Energyyearroof) - # filenamewall = self.folderPath[0] + '/Energyyearwall.txt' - # header = '%row col irradiance' - # numformat = '%4d %4d %6.2f' - # np.savetxt(filenamewall, Energyyearwall[:, (0, 1, 2)], fmt=numformat, header=header, comments='') - # if self.usevegdem == 1: - # filenamewall = self.folderPath[0] + '/Vegetationdata.txt' - # header = '%row col height' - # numformat = '%4d %4d %6.2f' - # np.savetxt(filenamewall, vegdata, fmt=numformat, header=header, comments='') - # - # self.iface.messageBar().pushMessage("SEBE", "Calculation succesfully completed", - # level=QgsMessageBar.INFO, duration=3) - # - # # load roof irradiance result into map canvas - # if self.dlg.checkBoxIntoCanvas.isChecked(): - # rlayer = self.iface.addRasterLayer(filenameroof) - # - # # Trigger a repaint - # if hasattr(rlayer, "setCacheImage"): - # rlayer.setCacheImage(None) - # rlayer.triggerRepaint() - # - # rlayer.loadNamedStyle(self.plugin_dir + '/SEBE_kwh.qml') - # # self.QgsMapLayerRegistry.instance().addMapLayer(rlayer) - # - # if hasattr(rlayer, "setCacheImage"): - # rlayer.setCacheImage(None) - # rlayer.triggerRepaint() - def startWorker(self, dsm, scale, building_slope,building_aspect, voxelheight, sizey, sizex, vegdsm, vegdsm2, wheight,waspect, albedo, psi, radmatI, radmatD, radmatR, usevegdem, calc_month, dlg): # create a new worker instance @@ -535,7 +518,6 @@ def workerFinished(self, ret): rlayer.triggerRepaint() rlayer.loadNamedStyle(self.plugin_dir + '/SEBE_kwh.qml') - # self.QgsMapLayerRegistry.instance().addMapLayer(rlayer) if hasattr(rlayer, "setCacheImage"): rlayer.setCacheImage(None) @@ -548,7 +530,9 @@ def workerFinished(self, ret): self.dlg.pushButtonClose.setEnabled(True) else: # notify the user that something went wrong - self.iface.messageBar().pushMessage('Operations cancelled either by user or error. See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.', level=QgsMessageBar.CRITICAL, duration=3) + self.iface.messageBar().pushMessage('Operations cancelled either by user or error. See the General tab in ' + 'Log Meassages Panel (speech bubble, lower right) for more information.' + , level=Qgis.Critical, duration=3) self.dlg.runButton.setText('Run') self.dlg.runButton.clicked.disconnect() self.dlg.runButton.clicked.connect(self.start_progress) @@ -556,7 +540,7 @@ def workerFinished(self, ret): self.dlg.progressBar.setValue(0) def workerError(self, errorstring): - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) def progress_update(self): self.steps += 1 @@ -567,6 +551,6 @@ def run(self): self.dlg.exec_() def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/processor/Solar%20Radiation%20Solar%20Energy%20on%20" \ - "Building%20Envelopes%20(SEBE).html" + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Solar_Radiation:_Solar_Energy_on_Building_Envelopes_.28SEBE.29" webbrowser.open_new_tab(url) diff --git a/SEBE/sebe_dialog.py b/SEBE/sebe_dialog.py index 90afaa0..d9b4b38 100644 --- a/SEBE/sebe_dialog.py +++ b/SEBE/sebe_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'sebe_dialog_base.ui')) -class SEBEDialog(QtGui.QDialog, FORM_CLASS): +class SEBEDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(SEBEDialog, self).__init__(parent) diff --git a/SEBE/sebeworker.py b/SEBE/sebeworker.py index 6b57ae9..735119e 100644 --- a/SEBE/sebeworker.py +++ b/SEBE/sebeworker.py @@ -1,10 +1,9 @@ -from PyQt4 import QtCore -# import traceback -# from SEBEfiles import SEBE_2015a_calc +from __future__ import print_function +from builtins import range +from qgis.PyQt import QtCore import numpy as np from ..Utilities.SEBESOLWEIGCommonFiles.shadowingfunction_wallheight_13 import shadowingfunction_wallheight_13 from ..Utilities.SEBESOLWEIGCommonFiles.shadowingfunction_wallheight_23 import shadowingfunction_wallheight_23 -# from ..Utilities import shadowingfunctions as shadow import linecache import sys @@ -266,7 +265,8 @@ def run(self): self.progress.emit() # move progressbar forward # Including radiation from ground on walls as well as removing pixels high than walls - print np.copy(Energyyearwall).shape + # fix_print_with_import + print(np.copy(Energyyearwall).shape) wallmatrixbol = (Energyyearwall > 0).astype(float) Energyyearwall = (Energyyearwall + (np.sum(radmatR[:, 2]) * albedo)/2) * wallmatrixbol diff --git a/SEBEVisual/__init__.py b/SEBEVisual/__init__.py index 211d6ac..30773ba 100644 --- a/SEBEVisual/__init__.py +++ b/SEBEVisual/__init__.py @@ -20,8 +20,9 @@ ***************************************************************************/ This script initializes the plugin, making it known to QGIS. """ +from __future__ import absolute_import def classFactory(iface): # load Sun class from file Sun - from sun import Sun + from .sun import Sun return Sun(iface) diff --git a/SEBEVisual/data/temp.tif b/SEBEVisual/data/temp.tif deleted file mode 100644 index ec977f4..0000000 Binary files a/SEBEVisual/data/temp.tif and /dev/null differ diff --git a/SEBEVisual/data/temp_asc.tif b/SEBEVisual/data/temp_asc.tif deleted file mode 100644 index fc1b88a..0000000 Binary files a/SEBEVisual/data/temp_asc.tif and /dev/null differ diff --git a/SEBEVisual/listworker.py b/SEBEVisual/listworker.py index b09b290..eb2214d 100644 --- a/SEBEVisual/listworker.py +++ b/SEBEVisual/listworker.py @@ -1,4 +1,6 @@ -from PyQt4 import QtCore +from builtins import next +from builtins import str +from qgis.PyQt import QtCore import traceback from qgis.core import * @@ -6,7 +8,7 @@ class Worker(QtCore.QObject): finished = QtCore.pyqtSignal(object) - error = QtCore.pyqtSignal(Exception, basestring) + error = QtCore.pyqtSignal(Exception, str) progress = QtCore.pyqtSignal() def __init__(self, minx, maxy, sizex, sizey, point1, point2, xllcorner, ytlcorner, cellsize, dir_path, wall_file): @@ -67,7 +69,7 @@ def run(self): if self.killed is False: self.progress.emit() ret = wall_array - except Exception, e: + except Exception as e: # forward the exception upstream self.error.emit(e, traceback.format_exc()) self.finished.emit(ret) diff --git a/SEBEVisual/resources.py b/SEBEVisual/resources.py index 014b4c7..198f502 100644 --- a/SEBEVisual/resources.py +++ b/SEBEVisual/resources.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x02\xdb\ diff --git a/SEBEVisual/resources_rc.py b/SEBEVisual/resources_rc.py index ff69bc8..28ea928 100644 --- a/SEBEVisual/resources_rc.py +++ b/SEBEVisual/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/SEBEVisual/sun.py b/SEBEVisual/sun.py index 989a191..ca7c8df 100644 --- a/SEBEVisual/sun.py +++ b/SEBEVisual/sun.py @@ -19,14 +19,20 @@ * * ***************************************************************************/ """ +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import object # Import the PyQt and QGIS libraries -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QThread -from PyQt4.QtGui import QIcon, QAction, QFileDialog, QMessageBox, QMovie +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QThread +from qgis.PyQt.QtGui import QIcon +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox # , QMovie from qgis.core import * from qgis.utils import * # Initialize Qt resources from file resources.py -import resources +from . import resources import os.path from osgeo import gdal @@ -35,25 +41,25 @@ import math import webbrowser -from listworker import Worker +from .listworker import Worker # Import the code for the GUI dialog -from visualizer_dialog import VisualizerDialog +from .visualizer_dialog import VisualizerDialog #import tools -from tools.areaTool import AreaTool +from .tools.areaTool import AreaTool #3d Model import #import tools.GLWindow as GLWindow # import tools.GLWidget try: # import tools.GLWidget as GLWidget - import tools.GLWidget + from . import tools.GLWidget except ImportError: pass -class Sun: +class Sun(object): #Runs when QGis starts up and the plugin is set to be active def __init__(self, iface): @@ -299,10 +305,10 @@ def visualize(self): dataset = gdal.Open(self.plugin_dir + '/data/temp_asc.tif') self.asc_array = dataset.ReadAsArray().astype(np.float) - movie = QMovie(self.plugin_dir + '/loader.gif') - self.visDlg.label.setMovie(movie) - self.visDlg.label.show() - movie.start() + # movie = QMovie(self.plugin_dir + '/loader.gif') + # self.visDlg.label.setMovie(movie) + # self.visDlg.label.show() + # movie.start() self.start_listworker(minx, maxy, sizex, sizey, toplefty) diff --git a/SEBEVisual/visualizer_dialog.py b/SEBEVisual/visualizer_dialog.py index cb1bd8b..6fdf7a2 100644 --- a/SEBEVisual/visualizer_dialog.py +++ b/SEBEVisual/visualizer_dialog.py @@ -23,8 +23,7 @@ import os -from PyQt4 import QtGui, uic - +from qgis.PyQt import QtGui, uic FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'visualizer_dialog_base.ui')) diff --git a/SOLWEIG/SOLWEIGpython/Kside_veg_v2015a.py b/SOLWEIG/SOLWEIGpython/Kside_veg_v2015a.py index ca21be4..87ffa0e 100644 --- a/SOLWEIG/SOLWEIGpython/Kside_veg_v2015a.py +++ b/SOLWEIG/SOLWEIGpython/Kside_veg_v2015a.py @@ -1,5 +1,6 @@ +from __future__ import absolute_import import numpy as np -from Kvikt_veg import Kvikt_veg +from .Kvikt_veg import Kvikt_veg def Kside_veg_v2015a(radI,radD,radG,shadow,svfS,svfW,svfN,svfE,svfEveg,svfSveg,svfWveg,svfNveg,azimuth,altitude,psi,t,albedo,F_sh,KupE,KupS,KupW,KupN,cyl): diff --git a/SOLWEIG/SOLWEIGpython/Lside_veg_v2015a.py b/SOLWEIG/SOLWEIGpython/Lside_veg_v2015a.py index a819ab9..3aecfb3 100644 --- a/SOLWEIG/SOLWEIGpython/Lside_veg_v2015a.py +++ b/SOLWEIG/SOLWEIGpython/Lside_veg_v2015a.py @@ -1,5 +1,6 @@ +from __future__ import absolute_import import numpy as np -from Lvikt_veg import Lvikt_veg +from .Lvikt_veg import Lvikt_veg def Lside_veg_v2015a(svfS,svfW,svfN,svfE,svfEveg,svfSveg,svfWveg,svfNveg,svfEaveg,svfSaveg,svfWaveg,svfNaveg,azimuth,altitude,Ta,Tw,SBC,ewall,Ldown,esky,t,F_sh,CI,LupE,LupS,LupW,LupN): diff --git a/SOLWEIG/SOLWEIGpython/Solweig_2015a_calc.py b/SOLWEIG/SOLWEIGpython/Solweig_2015a_calc.py index 15fdf5b..f658e6d 100644 --- a/SOLWEIG/SOLWEIGpython/Solweig_2015a_calc.py +++ b/SOLWEIG/SOLWEIGpython/Solweig_2015a_calc.py @@ -1,16 +1,17 @@ +from __future__ import absolute_import import numpy as np -from daylen import daylen +from .daylen import daylen from ...Utilities.SEBESOLWEIGCommonFiles.clearnessindex_2013b import clearnessindex_2013b from ...Utilities.SEBESOLWEIGCommonFiles.diffusefraction import diffusefraction from ...Utilities.SEBESOLWEIGCommonFiles.shadowingfunction_wallheight_13 import shadowingfunction_wallheight_13 from ...Utilities.SEBESOLWEIGCommonFiles.shadowingfunction_wallheight_23 import shadowingfunction_wallheight_23 -from gvf_2015a import gvf_2015a -from cylindric_wedge import cylindric_wedge -from TsWaveDelay_2015a import TsWaveDelay_2015a -from Kup_veg_2015a import Kup_veg_2015a -from Lside_veg_v2015a import Lside_veg_v2015a -from Kside_veg_v2015a import Kside_veg_v2015a +from .gvf_2015a import gvf_2015a +from .cylindric_wedge import cylindric_wedge +from .TsWaveDelay_2015a import TsWaveDelay_2015a +from .Kup_veg_2015a import Kup_veg_2015a +from .Lside_veg_v2015a import Lside_veg_v2015a +from .Kside_veg_v2015a import Kside_veg_v2015a def Solweig_2015a_calc(i, dsm, scale, rows, cols, svf, svfN, svfW, svfE, svfS, svfveg, svfNveg, svfEveg, svfSveg, svfWveg, svfaveg, svfEaveg, svfSaveg, svfWaveg, svfNaveg, vegdem, vegdem2, albedo_b, absK, absL, diff --git a/SOLWEIG/SOLWEIGpython/gvf_2015a.py b/SOLWEIG/SOLWEIGpython/gvf_2015a.py index 4f28716..8f87320 100644 --- a/SOLWEIG/SOLWEIGpython/gvf_2015a.py +++ b/SOLWEIG/SOLWEIGpython/gvf_2015a.py @@ -1,5 +1,6 @@ +from __future__ import absolute_import import numpy as np -from sunonsurface_2015a import sunonsurface_2015a +from .sunonsurface_2015a import sunonsurface_2015a def gvf_2015a(wallsun,walls,buildings,scale,shadow,first,second,dirwalls,Tg,Tgwall,Ta,emis_grid,ewall,alb_grid,SBC,albedo_b,rows, cols,Twater,lc_grid,landcover): diff --git a/SOLWEIG/WriteMetadataSOLWEIG.py b/SOLWEIG/WriteMetadataSOLWEIG.py index 43976f4..26567e7 100644 --- a/SOLWEIG/WriteMetadataSOLWEIG.py +++ b/SOLWEIG/WriteMetadataSOLWEIG.py @@ -1,3 +1,4 @@ +from builtins import str # This file prints out run information used for each specific run from time import strftime from osgeo import osr diff --git a/SOLWEIG/resources_rc.py b/SOLWEIG/resources_rc.py index 14b08b3..cfce86e 100644 --- a/SOLWEIG/resources_rc.py +++ b/SOLWEIG/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/SOLWEIG/solweig.py b/SOLWEIG/solweig.py index aa5436c..4cf854f 100644 --- a/SOLWEIG/solweig.py +++ b/SOLWEIG/solweig.py @@ -20,24 +20,31 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QThread, QCoreApplication -from PyQt4.QtGui import QIcon, QAction, QFileDialog, QMessageBox +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QThread, QCoreApplication +from qgis.PyQt.QtGui import QIcon +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox from qgis.core import * from qgis.gui import * -from solweig_dialog import SOLWEIGDialog +from .solweig_dialog import SOLWEIGDialog import numpy as np from osgeo import gdal, osr import os.path import zipfile import webbrowser from osgeo.gdalconst import * -from solweigworker import Worker -import WriteMetadataSOLWEIG +from .solweigworker import Worker +from . import WriteMetadataSOLWEIG from ..Utilities.SEBESOLWEIGCommonFiles import Solweig_v2015_metdata_noload as metload -from SOLWEIGpython.Tgmaps_v1 import Tgmaps_v1 +from .SOLWEIGpython.Tgmaps_v1 import Tgmaps_v1 -class SOLWEIG: +class SOLWEIG(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -72,8 +79,10 @@ def __init__(self, iface): self.dlg.runButton.clicked.connect(self.start_progress) self.dlg.pushButtonSave.clicked.connect(self.folder_path_out) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) self.dlg.pushButtonImportMetData.clicked.connect(self.met_file) self.fileDialogMet = QFileDialog() @@ -90,51 +99,34 @@ def __init__(self, iface): # self.toolbar = self.iface.addToolBar(u'SOLWEIG') # self.toolbar.setObjectName(u'SOLWEIG') - # self.layerComboManagerDSM = RasterLayerCombo(self.dlg.comboBox_dsm) - # RasterLayerCombo(self.dlg.comboBox_dsm, initLayer="") self.layerComboManagerDSM = QgsMapLayerComboBox(self.dlg.widgetDSM) self.layerComboManagerDSM.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerDSM.setFixedWidth(175) self.layerComboManagerDSM.setCurrentIndex(-1) - # self.layerComboManagerVEGDSM = RasterLayerCombo(self.dlg.comboBox_vegdsm) - # RasterLayerCombo(self.dlg.comboBox_vegdsm, initLayer="") self.layerComboManagerVEGDSM = QgsMapLayerComboBox(self.dlg.widgetCDSM) self.layerComboManagerVEGDSM.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerVEGDSM.setFixedWidth(175) self.layerComboManagerVEGDSM.setCurrentIndex(-1) - # self.layerComboManagerVEGDSM2 = RasterLayerCombo(self.dlg.comboBox_vegdsm2) - # RasterLayerCombo(self.dlg.comboBox_vegdsm2, initLayer="") self.layerComboManagerVEGDSM2 = QgsMapLayerComboBox(self.dlg.widgetTDSM) self.layerComboManagerVEGDSM2.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerVEGDSM2.setFixedWidth(175) self.layerComboManagerVEGDSM2.setCurrentIndex(-1) - # self.layerComboManagerDEM = RasterLayerCombo(self.dlg.comboBox_dem) - # RasterLayerCombo(self.dlg.comboBox_dem, initLayer="") self.layerComboManagerDEM = QgsMapLayerComboBox(self.dlg.widgetDEM) self.layerComboManagerDEM.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerDEM.setFixedWidth(175) self.layerComboManagerDEM.setCurrentIndex(-1) - # self.layerComboManagerLC = RasterLayerCombo(self.dlg.comboBox_landcover) - # RasterLayerCombo(self.dlg.comboBox_landcover, initLayer="") self.layerComboManagerLC = QgsMapLayerComboBox(self.dlg.widgetLC) self.layerComboManagerLC.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerLC.setFixedWidth(175) self.layerComboManagerLC.setCurrentIndex(-1) - # self.layerComboManagerWH = RasterLayerCombo(self.dlg.comboBox_wallheight) - # RasterLayerCombo(self.dlg.comboBox_wallheight, initLayer="") self.layerComboManagerWH = QgsMapLayerComboBox(self.dlg.widgetWH) self.layerComboManagerWH.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerWH.setFixedWidth(175) self.layerComboManagerWH.setCurrentIndex(-1) - # self.layerComboManagerWA = RasterLayerCombo(self.dlg.comboBox_wallaspect) - # RasterLayerCombo(self.dlg.comboBox_wallaspect, initLayer="") self.layerComboManagerWA = QgsMapLayerComboBox(self.dlg.widgetWA) self.layerComboManagerWA.setFilters(QgsMapLayerProxyModel.RasterLayer) self.layerComboManagerWA.setFixedWidth(175) self.layerComboManagerWA.setCurrentIndex(-1) - # self.layerComboManagerPOI = VectorLayerCombo(self.dlg.comboBox_POIlayer) - # fieldgen = VectorLayerCombo(self.dlg.comboBox_POIlayer, initLayer="", options={"geomType": QGis.Point}) - # self.layerComboManagerPOIfield = FieldCombo(self.dlg.comboBox_POIfield, fieldgen, initField="") #, options={"fieldType": int} self.layerComboManagerPOI = QgsMapLayerComboBox(self.dlg.widgetPointLayer) self.layerComboManagerPOI.setCurrentIndex(-1) self.layerComboManagerPOI.setFilters(QgsMapLayerProxyModel.PointLayer) @@ -261,7 +253,7 @@ def read_metdata(self): if self.metdata.shape[1] == 24: self.iface.messageBar().pushMessage("SOLWEIG", "Meteorological data succesfully loaded", - level=QgsMessageBar.INFO, duration=3) + level=Qgis.Info, duration=3) else: QMessageBox.critical(self.dlg, "Import Error", "Wrong number of columns in meteorological data. You can " @@ -716,9 +708,6 @@ def start_progress(self): YYYY, altitude, azimuth, zen, jday, leafon, dectime, altmax = \ metload.Solweig_2015a_metdata_noload(self.metdata, location, UTC) - # QMessageBox.critical(self.dlg, "Error", str(location)) - # return - # %Creating vectors from meteorological input DOY = self.metdata[:, 1] hours = self.metdata[:, 2] @@ -750,7 +739,8 @@ def start_progress(self): vlayer = QgsVectorLayer(poilyr.source(), "point", "ogr") prov = vlayer.dataProvider() #fields = prov.fields() - idx = vlayer.fieldNameIndex(poi_field) + # idx = vlayer.fieldNameIndex(poi_field) + idx = vlayer.fields().indexFromName(poi_field) numfeat = vlayer.featureCount() self.poiname = [] self.poisxy = np.zeros((numfeat, 3)) - 999 @@ -774,14 +764,9 @@ def start_progress(self): return for k in range(0, self.poisxy.shape[0]): - poi_save = [] # np.zeros((1, 33)) - #data_out = self.folderPath[0] + '/POI_' + str(self.poisxy[k, 0]) + '.txt' + poi_save = [] # np.zeros((1, 33)) data_out = self.folderPath[0] + '/POI_' + str(self.poiname[k]) + '.txt' np.savetxt(data_out, poi_save, delimiter=' ', header=header, comments='') # fmt=numformat, - #f_handle = file(data_out, 'a') - #endoffile = [-9, -9] - #np.savetxt(f_handle, endoffile, fmt='%2d') - #f_handle.close() self.dlg.progressBar.setRange(0, Ta.__len__()) @@ -804,15 +789,6 @@ def start_progress(self): amaxvalue = self.dsm.max() - self.dsm.min() amaxvalue = np.maximum(amaxvalue, vegmax) - # # Elevation vegdsms if buildingDEM includes ground heights - # vegdem = self.vegdsm + self.dsm - # vegdem[vegdem == self.dsm] = 0 - # vegdem2 = self.vegdsm2 + self.dsm - # vegdem2[vegdem2 == self.dsm] = 0 - # - # # % Bush separation - # bush = np.logical_not((vegdem2 * vegdem)) * vegdem - # Elevation vegdsms if buildingDEM includes ground heights self.vegdsm = self.vegdsm + self.dsm self.vegdsm[self.vegdsm == self.dsm] = 0 @@ -829,9 +805,6 @@ def start_progress(self): bush = np.zeros([rows, cols]) amaxvalue = 0 - # QMessageBox.critical(None, "Test", str(amaxvalue)) - # return - # % Ts parameterisation maps if self.landcover == 1.: if np.max(self.lcgrid) > 7 or np.min(self.lcgrid) < 1: @@ -864,10 +837,6 @@ def start_progress(self): timeaddN = 0. firstdaytime = 1. - # self.iface.messageBar().pushMessage("Ta.__len__ ", str(int(Ta.__len__()))) - #self.iface.messageBar().pushMessage("__len__", str(buildings.shape[0])) - #self.iface.messageBar().pushMessage("__len__", str(self.lcgrid.shape[0])) - WriteMetadataSOLWEIG.writeRunInfo(self.folderPath[0], filepath_dsm, self.gdal_dsm, self.usevegdem, filePath_cdsm, trunkfile, filePath_tdsm, lat, lon, UTC, self.landcover, filePath_lc, metfileexist, PathMet, self.metdata, self.plugin_dir, @@ -876,8 +845,6 @@ def start_progress(self): # If metfile starts at night CI = 1. - # self.iface.messageBar().pushMessage("Ta", self.folderPath[0] + '/Tmrt_' + str(int(YYYY[0, i])) + '_' + str(int(DOY[0, i])) + '_' + str(int(hours[0, i])) + str(int(minu[0, i])) + '.tif') - # self.iface.messageBar().pushMessage("hour", str(hours[5])) self.startWorker(self.dsm, self.scale, rows, cols, svf, svfN, svfW, svfE, svfS, svfveg, svfNveg, svfEveg, svfSveg, svfWveg, svfaveg, svfEaveg, svfSaveg, svfWaveg, svfNaveg, self.vegdsm, self.vegdsm2, albedo_b, absK, absL, ewall, Fside, Fup, altitude, @@ -963,11 +930,12 @@ def start_progress(self): # self.iface.messageBar().pushMessage("SOLWEIG", "Model calculations successful.") def run(self): + """This methods is needed for QGIS to start the plugin""" self.dlg.show() self.dlg.exec_() def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/processor/Outdoor%20Thermal%20Comfort%20SOLWEIG.html' + url = 'http://urban-climate.net/umep/UMEP_Manual#Outdoor_Thermal_Comfort:_SOLWEIG' webbrowser.open_new_tab(url) def saveraster(self, gdal_data, filename, raster): @@ -1034,7 +1002,6 @@ def workerFinished(self, ret): self.thread.deleteLater() # remove widget from message bar if ret is not None: - # report the result # load result into canvas if self.dlg.checkBoxIntoCanvas.isChecked(): tmrtplot = ret["tmrtplot"] @@ -1047,20 +1014,11 @@ def workerFinished(self, ret): rlayer.triggerRepaint() rlayer.loadNamedStyle(self.plugin_dir + '/tmrt.qml') - # self.QgsMapLayerRegistry.instance().addMapLayer(rlayer) if hasattr(rlayer, "setCacheImage"): rlayer.setCacheImage(None) rlayer.triggerRepaint() - # Set opacity - # rlayer.renderer().setOpacity(0.5) - - # Trigger a repaint - # if hasattr(rlayer, "setCacheImage"): - # rlayer.setCacheImage(None) - # rlayer.triggerRepaint() - QMessageBox.information(self.dlg,"SOLWEIG", "Model calculations successful!\r\n" "Setting for this calculation is found in RunInfoSOLWEIG.txt located in " "the output folder specified.") @@ -1074,7 +1032,7 @@ def workerFinished(self, ret): # notify the user that something went wrong self.iface.messageBar().pushMessage( 'Operations cancelled either by user or error. See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.', - level=QgsMessageBar.CRITICAL, duration=3) + level=Qgis.Critical, duration=3) self.dlg.runButton.setText('Run') self.dlg.runButton.clicked.disconnect() self.dlg.runButton.clicked.connect(self.start_progress) @@ -1082,4 +1040,4 @@ def workerFinished(self, ret): self.dlg.progressBar.setValue(0) def workerError(self, errorstring): - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) diff --git a/SOLWEIG/solweig_dialog.py b/SOLWEIG/solweig_dialog.py index 850855f..c226e4f 100644 --- a/SOLWEIG/solweig_dialog.py +++ b/SOLWEIG/solweig_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'solweig_dialog_base.ui')) -class SOLWEIGDialog(QtGui.QDialog, FORM_CLASS): +class SOLWEIGDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(SOLWEIGDialog, self).__init__(parent) diff --git a/SOLWEIG/solweigworker.py b/SOLWEIG/solweigworker.py index f4c1f52..c01f971 100644 --- a/SOLWEIG/solweigworker.py +++ b/SOLWEIG/solweigworker.py @@ -1,10 +1,15 @@ -from PyQt4 import QtCore, QtGui +from __future__ import absolute_import +from builtins import str +from builtins import range +from qgis.PyQt import QtCore, QtGui import traceback import numpy as np import linecache import sys +from qgis.core import QgsFeature, QgsVectorFileWriter, QgsVectorDataProvider, QgsField, Qgis, QgsMessageLog -from SOLWEIGpython import Solweig_2015a_calc as so + +from .SOLWEIGpython import Solweig_2015a_calc as so # from SOLWEIGpython.clearnessindex_2013b import clearnessindex_2013b from ..Utilities.SEBESOLWEIGCommonFiles.clearnessindex_2013b import clearnessindex_2013b from osgeo.gdalconst import * @@ -215,7 +220,10 @@ def run(self): tmrtplot = np.zeros((rows, cols)) TgOut1 = np.zeros((rows, cols)) - numformat = '%3d %2d %3d %2d %6.5f ' + '%6.2f ' * 28 + numformat = '%d %d %d %d %.5f ' + '%.2f ' * 28 + + numformat = '%d %d %d %d %.5f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f ' \ + '%.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' for i in np.arange(0, Ta.__len__()): self.progress.emit() # move progressbar forward @@ -299,7 +307,8 @@ def run(self): poi_save[0, 32] = KsideI[int(poisxy[k, 2]), int(poisxy[k, 1])] data_out = self.folderPath[0] + '/POI_' + str(self.poiname[k]) + '.txt' - f_handle = file(data_out, 'a') + # f_handle = file(data_out, 'a') + f_handle = open(data_out, 'ab') np.savetxt(f_handle, poi_save, fmt=numformat) f_handle.close() diff --git a/SUEWS/resources_rc.py b/SUEWS/resources_rc.py index 5fcedf0..c0a2bd2 100644 --- a/SUEWS/resources_rc.py +++ b/SUEWS/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/SUEWS/suews.py b/SUEWS/suews.py index 04efb47..6bc2086 100644 --- a/SUEWS/suews.py +++ b/SUEWS/suews.py @@ -20,20 +20,26 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.gui import QgsMessageBar -from suews_dialog import SUEWSDialog +from .suews_dialog import SUEWSDialog import os import shutil import sys import webbrowser -import urllib +import urllib.request, urllib.parse, urllib.error from ..Utilities import f90nml from ..suewsmodel import Suews_wrapper_v2018a -class SUEWS: +class SUEWS(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -69,8 +75,10 @@ def __init__(self, iface): self.dlg.pushButtonSave.clicked.connect(self.folder_path_out) self.dlg.helpButton.clicked.connect(self.help) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) # Declare instance attributes self.actions = [] @@ -155,28 +163,46 @@ def unload(self): def run(self): if not (os.path.isfile(self.model_dir + os.sep + 'SUEWS_V2018a') or os.path.isfile(self.model_dir + os.sep + 'SUEWS_V2018a.exe')): if QMessageBox.question(self.iface.mainWindow(), "OS specific binaries missing", - "Before you start to use this plugin for the very first time, the OS specific suews\r\n" - "program (4Mb) must be be download from the UMEP repository and stored\r\n" - "in your plugin directory: " - "(" + self.model_dir + ").\r\n" - "\r\n" - "Join the email-list for updates and other information:\r\n" - "http://www.lists.rdg.ac.uk/mailman/listinfo/met-umep.\r\n" - "\r\n" - "UMEP on the web:\r\n" - "http://www.urban-climate.net/umep/\r\n" - "\r\n" - "\r\n" - "Do you want to contiune with the download?", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Ok: + "Before you start to use this plugin for the very first time, the OS specific suews\r\n" + "program (4Mb) must be be download from the UMEP repository and stored\r\n" + "in your plugin directory: " + "(" + self.model_dir + ").\r\n" + "\r\n" + "Join the email-list for updates and other information:\r\n" + "http://www.lists.rdg.ac.uk/mailman/listinfo/met-umep.\r\n" + "\r\n" + "UMEP on the web:\r\n" + "http://www.urban-climate.net/umep/\r\n" + "\r\n" + "\r\n" + "Do you want to contiune with the download?", + QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Ok: + # testfile = urllib.URLopener() if sys.platform == 'win32': - urllib.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695894_suews_v2018a.exe', - self.model_dir + os.sep + 'SUEWS_V2018a.exe') + urllib.request.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695894_suews_v2018a.exe', + self.model_dir + os.sep + 'SUEWS_V2018a.exe') + # testfile2 = urllib.URLopener() + # testfile2.retrieve('http://www.urban-climate.net/umep/repo/nib/win/cyggcc_s-seh-1.dll', + # self.model_dir + os.sep + 'cyggcc_s-seh-1.dll') + # testfile3 = urllib.URLopener() + # testfile3.retrieve('http://www.urban-climate.net/umep/repo/nib/win/cyggfortran-3.dll', + # self.model_dir + os.sep + 'cyggfortran-3.dll') + # testfile4 = urllib.URLopener() + # testfile4.retrieve('http://www.urban-climate.net/umep/repo/nib/win/cygquadmath-0.dll', + # self.model_dir + os.sep + 'cygquadmath-0.dll') + # testfile5 = urllib.URLopener() + # testfile5.retrieve('http://www.urban-climate.net/umep/repo/nib/win/cygwin1.dll', + # self.model_dir + os.sep + 'cygwin1.dll') if sys.platform == 'linux2': - urllib.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695887_suews_v2018a', self.model_dir + os.sep + 'SUEWS_V2018a') + urllib.request.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695887_suews_v2018a', + self.model_dir + os.sep + 'SUEWS_V2018a') if sys.platform == 'darwin': - urllib.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695886_suews_v2018a', self.model_dir + os.sep + 'SUEWS_V2018a') + urllib.request.urlretrieve('https://gvc.gu.se/digitalAssets/1695/1695886_suews_v2018a', + self.model_dir + os.sep + 'SUEWS_V2018a') + else: - QMessageBox.critical(self.iface.mainWindow(), "Binaries not downloaded", "This plugin will not be able to start before binaries are downloaded") + QMessageBox.critical(self.iface.mainWindow(), "Binaries not downloaded", + "This plugin will not be able to start before binaries are downloaded") return self.dlg.show() @@ -300,7 +326,7 @@ def start_progress(self): minperyear = 525600 count = 0 - for line in open(infolder + "/" + filecode + "_" + str(yyyy) + "_data_" + inputRes + ".txt").xreadlines(): count += 1 + for line in open(infolder + "/" + filecode + "_" + str(yyyy) + "_data_" + inputRes + ".txt"): count += 1 linesperyear = int(minperyear / 60.) diff --git a/SUEWS/suews_dialog.py b/SUEWS/suews_dialog.py index 9b19a57..106c8ca 100644 --- a/SUEWS/suews_dialog.py +++ b/SUEWS/suews_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'suews_dialog_base.ui')) -class SUEWSDialog(QtGui.QDialog, FORM_CLASS): +class SUEWSDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(SUEWSDialog, self).__init__(parent) diff --git a/SUEWSAnalyzer/resources_rc.py b/SUEWSAnalyzer/resources_rc.py index 127f54e..3961a0b 100644 --- a/SUEWSAnalyzer/resources_rc.py +++ b/SUEWSAnalyzer/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x03\xd1\ diff --git a/SUEWSAnalyzer/suews_analyzer.py b/SUEWSAnalyzer/suews_analyzer.py index 01fe3ed..86bb9b8 100644 --- a/SUEWSAnalyzer/suews_analyzer.py +++ b/SUEWSAnalyzer/suews_analyzer.py @@ -20,15 +20,21 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QVariant, QCoreApplication -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox -from PyQt4 import QtGui +from __future__ import print_function +from __future__ import absolute_import +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QVariant, QCoreApplication +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon +from qgis.PyQt import QtGui from qgis.core import * from qgis.gui import * # Initialize Qt resources from file resources.py -import resources_rc +# from . import resources_rc # Import the code for the dialog -from suews_analyzer_dialog import SUEWSAnalyzerDialog +from .suews_analyzer_dialog import SUEWSAnalyzerDialog import os.path import webbrowser from osgeo import gdal @@ -49,7 +55,7 @@ pass -class SUEWSAnalyzer: +class SUEWSAnalyzer(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -355,7 +361,7 @@ def changeYearSP(self): def geotiff_save(self): self.outputfile = self.fileDialog.getSaveFileName(None, "Save File As:", None, "GeoTIFF (*.tif)") - self.dlg.textOutput.setText(self.outputfile) + self.dlg.textOutput.setText(self.outputfile[0]) def get_unit(self): uni = self.lineunit[self.id] @@ -462,7 +468,8 @@ def spatial(self): vlayer = QgsVectorLayer(poly.source(), "polygon", "ogr") prov = vlayer.dataProvider() fields = prov.fields() - idx = vlayer.fieldNameIndex(poly_field) + # idx = vlayer.fieldNameIndex(poly_field) + idx = vlayer.fields().indexFromName(poly_field) typetest = fields.at(idx).type() if typetest == 10: QMessageBox.critical(self.dlg, "ID field is sting type", "ID field must be either integer or float") @@ -522,7 +529,7 @@ def spatial(self): idvec = np.vstack((idvec, int(gid))) statvector = statvectemp[1:, :] - print idvec + # fix_print_with_import statmat = np.hstack((idvec[1:, :], statvector)) numformat2 = '%8d %5.3f' @@ -545,11 +552,11 @@ def spatial(self): resx = self.dlg.doubleSpinBoxRes.value() else: for f in vlayer.getFeatures(): # Taking first polygon. Could probably be done nicer - geom = f.geometry().asPolygon() + # geom = f.geometry().asPolygon() + geom = f.geometry().asMultiPolygon() break - - resx = np.abs(geom[0][0].x() - geom[0][2].x()) - resy = np.abs(geom[0][0].y() - geom[0][2].y()) + resx = np.abs(geom[0][0][0][0] - geom[0][0][2][0]) # x + resy = np.abs(geom[0][0][0][1] - geom[0][0][2][1]) # y if not resx == resy: QMessageBox.critical(self.dlg, "Error", "Polygons not squared in current CRS") @@ -601,7 +608,7 @@ def spatial(self): # # Set colors s = QgsRasterShader() c = QgsColorRampShader() - c.setColorRampType(QgsColorRampShader.INTERPOLATED) + c.setColorRampType(QgsColorRampShader.Interpolated) i = [] i.append(QgsColorRampShader.ColorRampItem(np.nanmin(gridout), QtGui.QColor('#2b83ba'), str(np.nanmin(gridout)))) i.append(QgsColorRampShader.ColorRampItem(np.nanmedian(gridout), QtGui.QColor('#ffffbf'), str(np.nanmedian(gridout)))) diff --git a/SUEWSAnalyzer/suews_analyzer_dialog.py b/SUEWSAnalyzer/suews_analyzer_dialog.py index 8fca475..58f8e27 100644 --- a/SUEWSAnalyzer/suews_analyzer_dialog.py +++ b/SUEWSAnalyzer/suews_analyzer_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'suews_analyzer_dialog_base.ui')) -class SUEWSAnalyzerDialog(QtGui.QDialog, FORM_CLASS): +class SUEWSAnalyzerDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(SUEWSAnalyzerDialog, self).__init__(parent) diff --git a/SUEWSPrepare/Modules/jdcal.py b/SUEWSPrepare/Modules/jdcal.py deleted file mode 100644 index e0fd31b..0000000 --- a/SUEWSPrepare/Modules/jdcal.py +++ /dev/null @@ -1,445 +0,0 @@ -# -*- coding:utf-8 -*- -"""Functions for converting between Julian dates and calendar dates. - -A function for converting Gregorian calendar dates to Julian dates, and -another function for converting Julian calendar dates to Julian dates -are defined. Two functions for the reverse calculations are also -defined. - -Different regions of the world switched to Gregorian calendar from -Julian calendar on different dates. Having separate functions for Julian -and Gregorian calendars allow maximum flexibility in choosing the -relevant calendar. - -All the above functions are "proleptic". This means that they work for -dates on which the concerned calendar is not valid. For example, -Gregorian calendar was not used prior to around October 1582. - -Julian dates are stored in two floating point numbers (double). Julian -dates, and Modified Julian dates, are large numbers. If only one number -is used, then the precision of the time stored is limited. Using two -numbers, time can be split in a manner that will allow maximum -precision. For example, the first number could be the Julian date for -the beginning of a day and the second number could be the fractional -day. Calculations that need the latter part can now work with maximum -precision. - -A function to test if a given Gregorian calendar year is a leap year is -defined. - -Zero point of Modified Julian Date (MJD) and the MJD of 2000/1/1 -12:00:00 are also given. - -This module is based on the TPM C library, by Jeffery W. Percival. The -idea for splitting Julian date into two floating point numbers was -inspired by the IAU SOFA C library. - -:author: Prasanth Nair -:contact: prasanthhn@gmail.com -:license: BSD (http://www.opensource.org/licenses/bsd-license.php) -""" -from __future__ import division -from __future__ import print_function -import math - -__version__ = "1.2" - -MJD_0 = 2400000.5 -MJD_JD2000 = 51544.5 - - -def fpart(x): - """Return fractional part of given number.""" - return math.modf(x)[0] - - -def ipart(x): - """Return integer part of given number.""" - return math.modf(x)[1] - - -def is_leap(year): - """Leap year or not in the Gregorian calendar.""" - x = math.fmod(year, 4) - y = math.fmod(year, 100) - z = math.fmod(year, 400) - - # Divisible by 4 and, - # either not divisible by 100 or divisible by 400. - return not x and (y or not z) - - -def gcal2jd(year, month, day): - """Gregorian calendar date to Julian date. - - The input and output are for the proleptic Gregorian calendar, - i.e., no consideration of historical usage of the calendar is - made. - - Parameters - ---------- - year : int - Year as an integer. - month : int - Month as an integer. - day : int - Day as an integer. - - Returns - ------- - jd1, jd2: 2-element tuple of floats - When added together, the numbers give the Julian date for the - given Gregorian calendar date. The first number is always - MJD_0 i.e., 2451545.5. So the second is the MJD. - - Examples - -------- - >>> gcal2jd(2000,1,1) - (2400000.5, 51544.0) - >>> 2400000.5 + 51544.0 + 0.5 - 2451545.0 - >>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678.0, 2000, - ....: 2012, 2245] - >>> month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] - >>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31] - >>> x = [gcal2jd(y, m, d) for y, m, d in zip(year, month, day)] - >>> for i in x: print i - (2400000.5, -2395215.0) - (2400000.5, -1451021.0) - (2400000.5, -1062364.0) - (2400000.5, -723762.0) - (2400000.5, -679162.0) - (2400000.5, -678774.0) - (2400000.5, -678368.0) - (2400000.5, -633797.0) - (2400000.5, -65812.0) - (2400000.5, 51827.0) - (2400000.5, 56242.0) - (2400000.5, 141393.0) - - Negative months and days are valid. For example, 2000/-2/-4 => - 1999/+12-2/-4 => 1999/10/-4 => 1999/9/30-4 => 1999/9/26. - - >>> gcal2jd(2000, -2, -4) - (2400000.5, 51447.0) - >>> gcal2jd(1999, 9, 26) - (2400000.5, 51447.0) - - >>> gcal2jd(2000, 2, -1) - (2400000.5, 51573.0) - >>> gcal2jd(2000, 1, 30) - (2400000.5, 51573.0) - - >>> gcal2jd(2000, 3, -1) - (2400000.5, 51602.0) - >>> gcal2jd(2000, 2, 28) - (2400000.5, 51602.0) - - Month 0 becomes previous month. - - >>> gcal2jd(2000, 0, 1) - (2400000.5, 51513.0) - >>> gcal2jd(1999, 12, 1) - (2400000.5, 51513.0) - - Day number 0 becomes last day of previous month. - - >>> gcal2jd(2000, 3, 0) - (2400000.5, 51603.0) - >>> gcal2jd(2000, 2, 29) - (2400000.5, 51603.0) - - If `day` is greater than the number of days in `month`, then it - gets carried over to the next month. - - >>> gcal2jd(2000,2,30) - (2400000.5, 51604.0) - >>> gcal2jd(2000,3,1) - (2400000.5, 51604.0) - - >>> gcal2jd(2001,2,30) - (2400000.5, 51970.0) - >>> gcal2jd(2001,3,2) - (2400000.5, 51970.0) - - Notes - ----- - The returned Julian date is for mid-night of the given date. To - find the Julian date for any time of the day, simply add time as a - fraction of a day. For example Julian date for mid-day can be - obtained by adding 0.5 to either the first part or the second - part. The latter is preferable, since it will give the MJD for the - date and time. - - BC dates should be given as -(BC - 1) where BC is the year. For - example 1 BC == 0, 2 BC == -1, and so on. - - Negative numbers can be used for `month` and `day`. For example - 2000, -1, 1 is the same as 1999, 11, 1. - - The Julian dates are proleptic Julian dates, i.e., values are - returned without considering if Gregorian dates are valid for the - given date. - - The input values are truncated to integers. - - """ - year = int(year) - month = int(month) - day = int(day) - - a = ipart((month - 14) / 12.0) - jd = ipart((1461 * (year + 4800 + a)) / 4.0) - jd += ipart((367 * (month - 2 - 12 * a)) / 12.0) - x = ipart((year + 4900 + a) / 100.0) - jd -= ipart((3 * x) / 4.0) - jd += day - 2432075.5 # was 32075; add 2400000.5 - - jd -= 0.5 # 0 hours; above JD is for midday, switch to midnight. - - return MJD_0, jd - - -def jd2gcal(jd1, jd2): - """Julian date to Gregorian calendar date and time of day. - - The input and output are for the proleptic Gregorian calendar, - i.e., no consideration of historical usage of the calendar is - made. - - Parameters - ---------- - jd1, jd2: int - Sum of the two numbers is taken as the given Julian date. For - example `jd1` can be the zero point of MJD (MJD_0) and `jd2` - can be the MJD of the date and time. But any combination will - work. - - Returns - ------- - y, m, d, f : int, int, int, float - Four element tuple containing year, month, day and the - fractional part of the day in the Gregorian calendar. The first - three are integers, and the last part is a float. - - Examples - -------- - >>> jd2gcal(*gcal2jd(2000,1,1)) - (2000, 1, 1, 0.0) - >>> jd2gcal(*gcal2jd(1950,1,1)) - (1950, 1, 1, 0.0) - - Out of range months and days are carried over to the next/previous - year or next/previous month. See gcal2jd for more examples. - - >>> jd2gcal(*gcal2jd(1999,10,12)) - (1999, 10, 12, 0.0) - >>> jd2gcal(*gcal2jd(2000,2,30)) - (2000, 3, 1, 0.0) - >>> jd2gcal(*gcal2jd(-1999,10,12)) - (-1999, 10, 12, 0.0) - >>> jd2gcal(*gcal2jd(2000, -2, -4)) - (1999, 9, 26, 0.0) - - >>> gcal2jd(2000,1,1) - (2400000.5, 51544.0) - >>> jd2gcal(2400000.5, 51544.0) - (2000, 1, 1, 0.0) - >>> jd2gcal(2400000.5, 51544.5) - (2000, 1, 1, 0.5) - >>> jd2gcal(2400000.5, 51544.245) - (2000, 1, 1, 0.24500000000261934) - >>> jd2gcal(2400000.5, 51544.1) - (2000, 1, 1, 0.099999999998544808) - >>> jd2gcal(2400000.5, 51544.75) - (2000, 1, 1, 0.75) - - Notes - ----- - The last element of the tuple is the same as - - (hh + mm / 60.0 + ss / 3600.0) / 24.0 - - where hh, mm, and ss are the hour, minute and second of the day. - - See Also - -------- - gcal2jd - - """ - from math import modf - - jd1_f, jd1_i = modf(jd1) - jd2_f, jd2_i = modf(jd2) - - jd_i = jd1_i + jd2_i - - f = jd1_f + jd2_f - - # Set JD to noon of the current date. Fractional part is the - # fraction from midnight of the current date. - if -0.5 < f < 0.5: - f += 0.5 - elif f >= 0.5: - jd_i += 1 - f -= 0.5 - elif f <= -0.5: - jd_i -= 1 - f += 1.5 - - l = jd_i + 68569 - n = ipart((4 * l) / 146097.0) - l -= ipart(((146097 * n) + 3) / 4.0) - i = ipart((4000 * (l + 1)) / 1461001) - l -= ipart((1461 * i) / 4.0) - 31 - j = ipart((80 * l) / 2447.0) - day = l - ipart((2447 * j) / 80.0) - l = ipart(j / 11.0) - month = j + 2 - (12 * l) - year = 100 * (n - 49) + i + l - - return int(year), int(month), int(day), f - - -def jcal2jd(year, month, day): - """Julian calendar date to Julian date. - - The input and output are for the proleptic Julian calendar, - i.e., no consideration of historical usage of the calendar is - made. - - Parameters - ---------- - year : int - Year as an integer. - month : int - Month as an integer. - day : int - Day as an integer. - - Returns - ------- - jd1, jd2: 2-element tuple of floats - When added together, the numbers give the Julian date for the - given Julian calendar date. The first number is always - MJD_0 i.e., 2451545.5. So the second is the MJD. - - Examples - -------- - >>> jcal2jd(2000, 1, 1) - (2400000.5, 51557.0) - >>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678, 2000, - ...: 2012, 2245] - >>> month = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12] - >>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31] - >>> x = [jcal2jd(y, m, d) for y, m, d in zip(year, month, day)] - >>> for i in x: print i - (2400000.5, -2395252.0) - (2400000.5, -1451039.0) - (2400000.5, -1062374.0) - (2400000.5, -723765.0) - (2400000.5, -679164.0) - (2400000.5, -678776.0) - (2400000.5, -678370.0) - (2400000.5, -633798.0) - (2400000.5, -65772.0) - (2400000.5, 51871.0) - (2400000.5, 56285.0) - - Notes - ----- - Unlike `gcal2jd`, negative months and days can result in incorrect - Julian dates. - - """ - year = int(year) - month = int(month) - day = int(day) - - jd = 367 * year - x = ipart((month - 9) / 7.0) - jd -= ipart((7 * (year + 5001 + x)) / 4.0) - jd += ipart((275 * month) / 9.0) - jd += day - jd += 1729777 - 2400000.5 # Return 240000.5 as first part of JD. - - jd -= 0.5 # Convert midday to midnight. - - return MJD_0, jd - - -def jd2jcal(jd1, jd2): - """Julian calendar date for the given Julian date. - - The input and output are for the proleptic Julian calendar, - i.e., no consideration of historical usage of the calendar is - made. - - Parameters - ---------- - jd1, jd2: int - Sum of the two numbers is taken as the given Julian date. For - example `jd1` can be the zero point of MJD (MJD_0) and `jd2` - can be the MJD of the date and time. But any combination will - work. - - Returns - ------- - y, m, d, f : int, int, int, float - Four element tuple containing year, month, day and the - fractional part of the day in the Julian calendar. The first - three are integers, and the last part is a float. - - Examples - -------- - >>> jd2jcal(*jcal2jd(2000, 1, 1)) - (2000, 1, 1, 0.0) - >>> jd2jcal(*jcal2jd(-4000, 10, 11)) - (-4000, 10, 11, 0.0) - - >>> jcal2jd(2000, 1, 1) - (2400000.5, 51557.0) - >>> jd2jcal(2400000.5, 51557.0) - (2000, 1, 1, 0.0) - >>> jd2jcal(2400000.5, 51557.5) - (2000, 1, 1, 0.5) - >>> jd2jcal(2400000.5, 51557.245) - (2000, 1, 1, 0.24500000000261934) - >>> jd2jcal(2400000.5, 51557.1) - (2000, 1, 1, 0.099999999998544808) - >>> jd2jcal(2400000.5, 51557.75) - (2000, 1, 1, 0.75) - - """ - from math import modf - - jd1_f, jd1_i = modf(jd1) - jd2_f, jd2_i = modf(jd2) - - jd_i = jd1_i + jd2_i - - f = jd1_f + jd2_f - - # Set JD to noon of the current date. Fractional part is the - # fraction from midnight of the current date. - if -0.5 < f < 0.5: - f += 0.5 - elif f >= 0.5: - jd_i += 1 - f -= 0.5 - elif f <= -0.5: - jd_i -= 1 - f += 1.5 - - j = jd_i + 1402.0 - k = ipart((j - 1) / 1461.0) - l = j - (1461.0 * k) - n = ipart((l - 1) / 365.0) - ipart(l / 1461.0) - i = l - (365.0 * n) + 30.0 - j = ipart((80.0 * i) / 2447.0) - day = i - ipart((2447.0 * j) / 80.0) - i = ipart(j / 11.0) - month = j + 2 - (12.0 * i) - year = (4 * k) + n + i - 4716.0 - - return int(year), int(month), int(day), f diff --git a/SUEWSPrepare/Modules/outdated_code.py b/SUEWSPrepare/Modules/outdated_code.py deleted file mode 100644 index 3005fe5..0000000 --- a/SUEWSPrepare/Modules/outdated_code.py +++ /dev/null @@ -1,398 +0,0 @@ - #OLD IMPORTS - from tabs.cond_tab import CondTab - from tabs.heat_tab import HeatTab - from tabs.imp_tab import ImpTab - from tabs.irr_tab import IrrTab - from tabs.OHMcoef_tab import OHMCoefTab - from tabs.prof_tab import ProfTab - from tabs.snow_tab import Snow - from tabs.soil_tab import SoilTab - from tabs.veg_tab import VegTab - from tabs.water_tab import Water - from tabs.waterdist_tab import WaterDistTab - - # self.conductance = CondTab() - # self.widgetlist.append(self.conductance) - # self.sheetlist.append(self.condsheet) - # self.titlelist.append("Surface conductance parameters") - # self.heat_tab = HeatTab() - # self.widgetlist.append(self.heat_tab) - # self.sheetlist.append(self.heatsheet) - # self.titlelist.append("Modelling anthropogenic heat flux ") - # self.imp_paved = ImpTab() - # self.widgetlist.append(self.imp_paved) - # self.sheetlist.append(self.impsheet) - # self.titlelist.append("Paved surface characteristics") - # self.imp_buildings = ImpTab() - # self.widgetlist.append(self.imp_buildings) - # self.sheetlist.append(self.impsheet) - # self.titlelist.append("Building surface characteristics") - # self.irr_tab = IrrTab() - # self.widgetlist.append(self.irr_tab) - # self.sheetlist.append(self.irrsheet) - # self.titlelist.append("Modelling irrigation") - # self.imp_baresoil = ImpTab() - # self.widgetlist.append(self.imp_baresoil) - # self.sheetlist.append(self.impsheet) - # self.titlelist.append("Bare soil surface characteristics") - # #self.OHM_tab = OHMCoefTab() - # self.prof_snow1 = ProfTab() - # self.widgetlist.append(self.prof_snow1) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Snow clearing profile (Weekdays)") - # self.prof_snow2 = ProfTab() - # self.widgetlist.append(self.prof_snow2) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Snow clearing profile (Weekends)") - # self.prof_energy1 = ProfTab() - # self.widgetlist.append(self.prof_energy1) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Energy use profile (Weekdays)") - # self.prof_energy2 = ProfTab() - # self.widgetlist.append(self.prof_energy2) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Energy use profile (Weekends)") - # self.prof_wateruse1 = ProfTab() - # self.widgetlist.append(self.prof_wateruse1) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Water use profile (Manual irrigation, Weekdays)") - # self.prof_wateruse2 = ProfTab() - # self.widgetlist.append(self.prof_wateruse2) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Water use profile (Manual irrigation, Weekends)") - # self.prof_wateruse3 = ProfTab() - # self.widgetlist.append(self.prof_wateruse3) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Water use profile (Automatic irrigation, Weekdays)") - # self.prof_wateruse4 = ProfTab() - # self.widgetlist.append(self.prof_wateruse4) - # self.sheetlist.append(self.profsheet) - # self.titlelist.append("Water use profile (Automatic irrigation, Weekends)") - # self.snow = Snow() - # self.widgetlist.append(self.snow) - # self.sheetlist.append(self.snowsheet) - # self.titlelist.append("Snow surface characteristics") - # #self.soil_tab = SoilTab() - # self.water = Water() - # self.widgetlist.append(self.water) - # self.sheetlist.append(self.watersheet) - # self.titlelist.append("Water surface characteristics") - # #self.waterdist_tab = WaterDistTab() - # self.veg_evergreen = VegTab() - # self.widgetlist.append(self.veg_evergreen) - # self.sheetlist.append(self.vegsheet) - # self.titlelist.append("Evergreen surface characteristics") - # self.veg_decidious = VegTab() - # self.widgetlist.append(self.veg_decidious) - # self.sheetlist.append(self.vegsheet) - # self.titlelist.append("Decidious surface characteristics") - # self.veg_grass = VegTab() - # self.widgetlist.append(self.veg_grass) - # self.sheetlist.append(self.vegsheet) - # self.titlelist.append("Grass surface characteristics") - - self.conductance = TemplateWidget() - self.widgetlist.append(self.conductance) - self.sheetlist.append(self.condsheet) - self.titlelist.append("Surface conductance parameters") - self.heat_tab = TemplateWidget() - self.widgetlist.append(self.heat_tab) - self.sheetlist.append(self.heatsheet) - self.titlelist.append("Modelling anthropogenic heat flux ") - self.imp_paved = TemplateWidget() - self.widgetlist.append(self.imp_paved) - self.sheetlist.append(self.impsheet) - self.titlelist.append("Paved surface characteristics") - self.imp_buildings = TemplateWidget() - self.widgetlist.append(self.imp_buildings) - self.sheetlist.append(self.impsheet) - self.titlelist.append("Building surface characteristics") - self.irr_tab = TemplateWidget() - self.widgetlist.append(self.irr_tab) - self.sheetlist.append(self.irrsheet) - self.titlelist.append("Modelling irrigation") - self.imp_baresoil = TemplateWidget() - self.widgetlist.append(self.imp_baresoil) - self.sheetlist.append(self.impsheet) - self.titlelist.append("Bare soil surface characteristics") - #self.OHM_tab = OHMCoefTab() - self.prof_snow1 = TemplateWidget() - self.widgetlist.append(self.prof_snow1) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Snow clearing profile (Weekdays)") - self.prof_snow2 = TemplateWidget() - self.widgetlist.append(self.prof_snow2) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Snow clearing profile (Weekends)") - self.prof_energy1 = TemplateWidget() - self.widgetlist.append(self.prof_energy1) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Energy use profile (Weekdays)") - self.prof_energy2 = TemplateWidget() - self.widgetlist.append(self.prof_energy2) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Energy use profile (Weekends)") - self.prof_wateruse1 = TemplateWidget() - self.widgetlist.append(self.prof_wateruse1) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Water use profile (Manual irrigation, Weekdays)") - self.prof_wateruse2 = TemplateWidget() - self.widgetlist.append(self.prof_wateruse2) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Water use profile (Manual irrigation, Weekends)") - self.prof_wateruse3 = TemplateWidget() - self.widgetlist.append(self.prof_wateruse3) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Water use profile (Automatic irrigation, Weekdays)") - self.prof_wateruse4 = TemplateWidget() - self.widgetlist.append(self.prof_wateruse4) - self.sheetlist.append(self.profsheet) - self.titlelist.append("Water use profile (Automatic irrigation, Weekends)") - self.snow = TemplateWidget() - self.widgetlist.append(self.snow) - self.sheetlist.append(self.snowsheet) - self.titlelist.append("Snow surface characteristics") - #self.soil_tab = SoilTab() - self.water = TemplateWidget() - self.widgetlist.append(self.water) - self.sheetlist.append(self.watersheet) - self.titlelist.append("Water surface characteristics") - #self.waterdist_tab = WaterDistTab() - self.veg_evergreen = TemplateWidget() - self.widgetlist.append(self.veg_evergreen) - self.sheetlist.append(self.vegsheet) - self.titlelist.append("Evergreen surface characteristics") - self.veg_decidious = TemplateWidget() - self.widgetlist.append(self.veg_decidious) - self.sheetlist.append(self.vegsheet) - self.titlelist.append("Decidious surface characteristics") - self.veg_grass = TemplateWidget() - self.widgetlist.append(self.veg_grass) - self.sheetlist.append(self.vegsheet) - self.titlelist.append("Grass surface characteristics") - - self.paved_tab = PavedTab() - self.tablist.append(self.paved_tab) - self.buildings_tab = BuildingsTab() - self.tablist.append(self.buildings_tab) - self.baresoil_tab = BareSoilTab() - self.tablist.append(self.baresoil_tab) - self.evergreen_tab = EvergreenTab() - self.tablist.append(self.evergreen_tab) - self.decidious_tab = DecidiousTab() - self.tablist.append(self.decidious_tab) - self.grass_tab = GrassTab() - self.tablist.append(self.grass_tab) - self.water_tab = WaterTab() - self.tablist.append(self.water_tab) - self.conductance_tab = ConductanceTab() - self.tablist.append(self.conductance_tab) - self.snow_tab = SnowTab() - self.tablist.append(self.snow_tab) - self.anthro_tab = AnthroTab() - self.tablist.append(self.anthro_tab) - self.energy_tab = EnergyTab() - self.tablist.append(self.energy_tab) - self.irrigation_tab = IrrigationTab() - self.tablist.append(self.irrigation_tab) - self.wateruse_tab = WaterUseTab() - self.tablist.append(self.wateruse_tab) - - self.main_tab = MainTab() - sm.setup_maintab(self.main_tab, self.iface) - - self.paved_tab.Layout.addWidget(self.imp_paved) - - self.buildings_tab.Layout.addWidget(self.imp_buildings) - - self.baresoil_tab.Layout.addWidget(self.imp_baresoil) - - self.evergreen_tab.Layout.addWidget(self.veg_evergreen) - - self.decidious_tab.Layout.addWidget(self.veg_decidious) - - self.grass_tab.Layout.addWidget(self.veg_grass) - - self.water_tab.Layout.addWidget(self.water) - - self.conductance_tab.Layout.addWidget(self.conductance) - - self.snow_tab.Layout.addWidget(self.snow) - self.snow_tab.Layout2.addWidget(self.prof_snow1) - self.snow_tab.Layout2.addWidget(self.prof_snow2) - - self.anthro_tab.Layout.addWidget(self.heat_tab) - - self.energy_tab.Layout.addWidget(self.prof_energy1) - self.energy_tab.Layout.addWidget(self.prof_energy2) - - self.irrigation_tab.Layout.addWidget(self.irr_tab) - - self.wateruse_tab.Layout.addWidget(self.prof_wateruse1) - self.wateruse_tab.Layout.addWidget(self.prof_wateruse2) - self.wateruse_tab.Layout2.addWidget(self.prof_wateruse3) - self.wateruse_tab.Layout2.addWidget(self.prof_wateruse4) - - self.dlg.tabWidget.addTab(self.main_tab, "Main settings") - self.dlg.tabWidget.addTab(self.paved_tab, "Paved") - self.dlg.tabWidget.addTab(self.buildings_tab, "Building") - self.dlg.tabWidget.addTab(self.baresoil_tab, "Bare Soil") - self.dlg.tabWidget.addTab(self.evergreen_tab, "Evergreen") - self.dlg.tabWidget.addTab(self.decidious_tab, "Decidious") - self.dlg.tabWidget.addTab(self.grass_tab, "Grass") - self.dlg.tabWidget.addTab(self.water_tab, "Water") - self.dlg.tabWidget.addTab(self.conductance_tab, "Conductance") - self.dlg.tabWidget.addTab(self.snow_tab, "Snow") - self.dlg.tabWidget.addTab(self.anthro_tab, "Anthropogenic") - self.dlg.tabWidget.addTab(self.energy_tab, "Energy") - self.dlg.tabWidget.addTab(self.irrigation_tab, "Irrigation") - self.dlg.tabWidget.addTab(self.wateruse_tab, "Water Use") - - - - - #OLD METHODS - def make_editable(self, widget, sheet): - code = widget.comboBox.currentText() - #QgsMessageLog.logMessage("code:" + str(code) + " " + str(type(code)), level=QgsMessageLog.CRITICAL) - code = int(code) - for row in range(3, sheet.nrows): - val = sheet.cell_value(row, 0) - #QgsMessageLog.logMessage("val:" + str(val) + " " + str(type(val)), level=QgsMessageLog.CRITICAL) - if val == code: - values = sheet.row_values(row, 1) - #QgsMessageLog.logMessage(str(values), level=QgsMessageLog.CRITICAL) - for x in range(0,len(values)): - if values[x] == "!": - break - exec "widget.lineEdit_" + str(x+1) + ".setEnabled(1)" - break - - def make_non_editable(self, widget, sheet): - code = widget.comboBox.currentText() - #QgsMessageLog.logMessage("code:" + str(code) + " " + str(type(code)), level=QgsMessageLog.CRITICAL) - code = int(code) - for row in range(3, sheet.nrows): - val = sheet.cell_value(row, 0) - #QgsMessageLog.logMessage("val:" + str(val) + " " + str(type(val)), level=QgsMessageLog.CRITICAL) - if val == code: - values = sheet.row_values(row, 1) - #QgsMessageLog.logMessage(str(values), level=QgsMessageLog.CRITICAL) - for x in range(0,len(values)): - if values[x] == "!": - break - exec "widget.lineEdit_" + str(x+1) + ".setEnabled(0)" - break - - - def setup_values_outdated(self, widget, filename): - file_path = self.plugin_dir + '/Input/' + filename - code = int(widget.comboBox.currentText()) - if os.path.isfile(file_path): - with open(file_path) as file: - next(file) - next(file) - for line in file: - split = line.split() - code_file = split[0] - if int(code_file) == code: - for x in range(1, len(split)): - if split[x] == "!": - explanation = "" - for y in range(x+1, len(split)): - explanation += str(split[y]) - explanation += " " - widget.exp_label.setText(explanation) - break - exec "widget.lineEdit_" + str(x) + ".setText(str(" + split[x] + "))" - break - else: - QMessageBox.critical(None, "Error", "Could not find the file:" + filename) - - - def setup_combo_outdated(self, widget, filename): - file_path = self.plugin_dir + '/Input/' + filename - if os.path.isfile(file_path): - with open(file_path) as file: - #QgsMessageLog.logMessage(file_path, level=QgsMessageLog.CRITICAL) - next(file) - next(file) - for line in file: - #QgsMessageLog.logMessage(line, level=QgsMessageLog.CRITICAL) - split = line.split() - code = split[0] - if int(code) == -9: - break - #elif isinstance(code, int): - else: - widget.comboBox.addItem(code) - else: - QMessageBox.critical(None, "Error", "Could not find the file:" + filename) - - - def test_excel(self): - #wb = openpyxl.load_workbook(self.file_path) - wb = copy(self.data) - sheet = self.get_sheet_by_name(wb, self.impsheet.name) - sheet.write(35, 0, "test") - wb.save(self.output_path + '/test.xls') - - def test_shapefile(self): - #skapar referens till vektorlagret, jag gr det genom skvgen fr filen men du kommer antagligen gra det genom combomanager eller liknande - vlayer = QgsVectorLayer(self.input_path + "grid_barb.shp", "vector layer", "ogr") - #Tar reda p antalet kollumner i attributformulret innan ngra lggs till - current_index_length = len(vlayer.dataProvider().attributeIndexes()) - #skvg till textfilen med inputs - file_path = self.input_path + self.test_file - #kollar om textfilen finns i skvgen - if os.path.isfile(file_path): - with open(file_path) as file: - #lser frsta raden i filen - line = file.readline() - #gr raden till en lista med varje "ord" som ett inlgg - line_split = line.split() - #hmtar vektorlagrets kapaciteter (Typ ndra attributdata, geometri osv) - caps = vlayer.dataProvider().capabilities() - #kollar om vektorlagret kan antera ndringar av attributdata - if caps & QgsVectorDataProvider.AddAttributes: - #Lgger till nya kollumner utifrn frsta raden i textfilen. Brjar p 1 och inte 0 eftersom frsta inlgget r id - for x in range(1, len(line_split)): - #Lgger till varje flt som fljer efter id i frsta raden (Typ pai). - #Den frsta variabeln i QgsField r en string, det andra definierar den data som ska sparas i kollumnen. - #Tror att det kanske r hr det gtt fel fr dig tidigare. - #Det som fr tillfllet r acceptabelt r String, Int eller Double och inlgg i kollumnen MSTE matcha den valda definitionen. - #Om man till exempel skapar en kollumn fr integers och sedan frsker lgga in en stringvariabel kommer inget inlgg gras. - #Eftersom det mesta i textfilen r decimaltal valde jag Double men String hade ocks fungerat - # lnge som det man vljer att skicka in till kollumnen r variabler i det valda formatet. str(0.5) funkar med andra ord - #fr en kollumn av formatet String men inte enbart 0.5. - vlayer.dataProvider().addAttributes([QgsField(line_split[x], QVariant.Double)]) - #Skapar en tom python dictionary, det r genom de hr som attributdata lggs till i attributformulret. - attr_dict = {} - #Lser varje rad som fljer efter den frsta i textfilen. - for line in file: - #Rensar dict:en. - attr_dict.clear() - #delar upp den lsta raden - split = line.split() - #frsta vrdet i raden r id fr den "Feature" som radens information ska lggas till i. - idx = int(split[0]) - #Ittererar ver varje vrde i raden efter id - for x in range(1, len(split)): - #Lgger till "vrdepar" till dict:en, formatet ser ut som {Nyckel: Vrde, Nyckel: Vrde} kan man sga. - #fr att Qgis ska lgga till vrdena korrekt krvs formatet {Kollumn Index: Vrde, Kollumn Index: Vrde} - #T ex: {1: "detta lggs till i kollumn med index 1 fr en senare definerad "Feature", 2: "detta lggs till i kollumn 2 osv"} - #Eftersom detta r en lite "klumpig" implementation kommer alltid nya kollumner lggas till nr metoden krs. Drfr hmtar vi - #kollumn index INNAN ngra kollumner lagts till. Utifrn tidigare index kan vi sedan rkna ut vad index kommer vara - # fr alla kollumner vi lagt till. Med varje kollumn idex parar vi ihop det vrde frn raden vi lst. - attr_dict[current_index_length + x - 1] = float(split[x]) - #Lgger till alla kollumnvrden i den skapade dict:en till "Feature" med id: idx - vlayer.dataProvider().changeAttributeValues({idx: attr_dict}) - #uppdaterar flt - vlayer.updateFields() - else: - QMessageBox.critical(None, "Error", "Vector Layer does not support adding attributes") - else: - QMessageBox.critical(None, "Error", "Could not find the file:" + self.test_file) - - - diff --git a/SUEWSPrepare/Modules/xlrd/__init__.py b/SUEWSPrepare/Modules/xlrd/__init__.py index 5b9274e..078c211 100644 --- a/SUEWSPrepare/Modules/xlrd/__init__.py +++ b/SUEWSPrepare/Modules/xlrd/__init__.py @@ -1,326 +1,23 @@ -from os import path - -from .info import __VERSION__ - -#

Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a -# BSD-style licence.

- -from . import licences - -## -#

A Python module for extracting data from MS Excel (TM) spreadsheet files. -#

-# Version 0.7.4 -- April 2012 -#

-# -#

General information

-# -#

Acknowledgements

-# -#

-# Development of this module would not have been possible without the document -# "OpenOffice.org's Documentation of the Microsoft Excel File Format" -# ("OOo docs" for short). -# The latest version is available from OpenOffice.org in -# PDF format -# and -# ODT format. -# Small portions of the OOo docs are reproduced in this -# document. A study of the OOo docs is recommended for those who wish a -# deeper understanding of the Excel file layout than the xlrd docs can provide. -#

-# -#

Backporting to Python 2.1 was partially funded by -# -# Journyx - provider of timesheet and project accounting solutions. -# -#

-# -#

Provision of formatting information in version 0.6.1 was funded by -# -# Simplistix Ltd. -# -#

-# -#

Unicode

-# -#

This module presents all text strings as Python unicode objects. -# From Excel 97 onwards, text in Excel spreadsheets has been stored as Unicode. -# Older files (Excel 95 and earlier) don't keep strings in Unicode; -# a CODEPAGE record provides a codepage number (for example, 1252) which is -# used by xlrd to derive the encoding (for same example: "cp1252") which is -# used to translate to Unicode.

-# -#

If the CODEPAGE record is missing (possible if the file was created -# by third-party software), xlrd will assume that the encoding is ascii, and keep going. -# If the actual encoding is not ascii, a UnicodeDecodeError exception will be raised and -# you will need to determine the encoding yourself, and tell xlrd: -#

-#     book = xlrd.open_workbook(..., encoding_override="cp1252")
-# 

-#

If the CODEPAGE record exists but is wrong (for example, the codepage -# number is 1251, but the strings are actually encoded in koi8_r), -# it can be overridden using the same mechanism. -# The supplied runxlrd.py has a corresponding command-line argument, which -# may be used for experimentation: -#

-#     runxlrd.py -e koi8_r 3rows myfile.xls
-# 

-#

The first place to look for an encoding ("codec name") is -# -# the Python documentation. -#

-#
-# -#

Dates in Excel spreadsheets

-# -#

In reality, there are no such things. What you have are floating point -# numbers and pious hope. -# There are several problems with Excel dates:

-# -#

(1) Dates are not stored as a separate data type; they are stored as -# floating point numbers and you have to rely on -# (a) the "number format" applied to them in Excel and/or -# (b) knowing which cells are supposed to have dates in them. -# This module helps with (a) by inspecting the -# format that has been applied to each number cell; -# if it appears to be a date format, the cell -# is classified as a date rather than a number. Feedback on this feature, -# especially from non-English-speaking locales, would be appreciated.

-# -#

(2) Excel for Windows stores dates by default as the number of -# days (or fraction thereof) since 1899-12-31T00:00:00. Excel for -# Macintosh uses a default start date of 1904-01-01T00:00:00. The date -# system can be changed in Excel on a per-workbook basis (for example: -# Tools -> Options -> Calculation, tick the "1904 date system" box). -# This is of course a bad idea if there are already dates in the -# workbook. There is no good reason to change it even if there are no -# dates in the workbook. Which date system is in use is recorded in the -# workbook. A workbook transported from Windows to Macintosh (or vice -# versa) will work correctly with the host Excel. When using this -# module's xldate_as_tuple function to convert numbers from a workbook, -# you must use the datemode attribute of the Book object. If you guess, -# or make a judgement depending on where you believe the workbook was -# created, you run the risk of being 1462 days out of kilter.

-# -#

Reference: -# http://support.microsoft.com/default.aspx?scid=KB;EN-US;q180162

-# -# -#

(3) The Excel implementation of the Windows-default 1900-based date system works on the -# incorrect premise that 1900 was a leap year. It interprets the number 60 as meaning 1900-02-29, -# which is not a valid date. Consequently any number less than 61 is ambiguous. Example: is 59 the -# result of 1900-02-28 entered directly, or is it 1900-03-01 minus 2 days? The OpenOffice.org Calc -# program "corrects" the Microsoft problem; entering 1900-02-27 causes the number 59 to be stored. -# Save as an XLS file, then open the file with Excel -- you'll see 1900-02-28 displayed.

-# -#

Reference: http://support.microsoft.com/default.aspx?scid=kb;en-us;214326

-# -#

(4) The Macintosh-default 1904-based date system counts 1904-01-02 as day 1 and 1904-01-01 as day zero. -# Thus any number such that (0.0 <= number < 1.0) is ambiguous. Is 0.625 a time of day (15:00:00), -# independent of the calendar, -# or should it be interpreted as an instant on a particular day (1904-01-01T15:00:00)? -# The xldate_* functions in this module -# take the view that such a number is a calendar-independent time of day (like Python's datetime.time type) for both -# date systems. This is consistent with more recent Microsoft documentation -# (for example, the help file for Excel 2002 which says that the first day -# in the 1904 date system is 1904-01-02). -# -#

(5) Usage of the Excel DATE() function may leave strange dates in a spreadsheet. Quoting the help file, -# in respect of the 1900 date system: "If year is between 0 (zero) and 1899 (inclusive), -# Excel adds that value to 1900 to calculate the year. For example, DATE(108,1,2) returns January 2, 2008 (1900+108)." -# This gimmick, semi-defensible only for arguments up to 99 and only in the pre-Y2K-awareness era, -# means that DATE(1899, 12, 31) is interpreted as 3799-12-31.

-# -#

For further information, please refer to the documentation for the xldate_* functions.

-# -#

Named references, constants, formulas, and macros

-# -#

-# A name is used to refer to a cell, a group of cells, a constant -# value, a formula, or a macro. Usually the scope of a name is global -# across the whole workbook. However it can be local to a worksheet. -# For example, if the sales figures are in different cells in -# different sheets, the user may define the name "Sales" in each -# sheet. There are built-in names, like "Print_Area" and -# "Print_Titles"; these two are naturally local to a sheet. -#

-# To inspect the names with a user interface like MS Excel, OOo Calc, -# or Gnumeric, click on Insert/Names/Define. This will show the global -# names, plus those local to the currently selected sheet. -#

-# A Book object provides two dictionaries (name_map and -# name_and_scope_map) and a list (name_obj_list) which allow various -# ways of accessing the Name objects. There is one Name object for -# each NAME record found in the workbook. Name objects have many -# attributes, several of which are relevant only when obj.macro is 1. -#

-# In the examples directory you will find namesdemo.xls which -# showcases the many different ways that names can be used, and -# xlrdnamesAPIdemo.py which offers 3 different queries for inspecting -# the names in your files, and shows how to extract whatever a name is -# referring to. There is currently one "convenience method", -# Name.cell(), which extracts the value in the case where the name -# refers to a single cell. More convenience methods are planned. The -# source code for Name.cell (in __init__.py) is an extra source of -# information on how the Name attributes hang together. -#

-# -#

Name information is not extracted from files older than -# Excel 5.0 (Book.biff_version < 50)

-# -#

Formatting

-# -#

Introduction

-# -#

This collection of features, new in xlrd version 0.6.1, is intended -# to provide the information needed to (1) display/render spreadsheet contents -# (say) on a screen or in a PDF file, and (2) copy spreadsheet data to another -# file without losing the ability to display/render it.

-# -#

The Palette; Colour Indexes

-# -#

A colour is represented in Excel as a (red, green, blue) ("RGB") tuple -# with each component in range(256). However it is not possible to access an -# unlimited number of colours; each spreadsheet is limited to a palette of 64 different -# colours (24 in Excel 3.0 and 4.0, 8 in Excel 2.0). Colours are referenced by an index -# ("colour index") into this palette. -# -# Colour indexes 0 to 7 represent 8 fixed built-in colours: black, white, red, green, blue, -# yellow, magenta, and cyan.

-# -# The remaining colours in the palette (8 to 63 in Excel 5.0 and later) -# can be changed by the user. In the Excel 2003 UI, Tools/Options/Color presents a palette -# of 7 rows of 8 colours. The last two rows are reserved for use in charts.
-# The correspondence between this grid and the assigned -# colour indexes is NOT left-to-right top-to-bottom.
-# Indexes 8 to 15 correspond to changeable -# parallels of the 8 fixed colours -- for example, index 7 is forever cyan; -# index 15 starts off being cyan but can be changed by the user.
-# -# The default colour for each index depends on the file version; tables of the defaults -# are available in the source code. If the user changes one or more colours, -# a PALETTE record appears in the XLS file -- it gives the RGB values for *all* changeable -# indexes.
-# Note that colours can be used in "number formats": "[CYAN]...." and "[COLOR8]...." refer -# to colour index 7; "[COLOR16]...." will produce cyan -# unless the user changes colour index 15 to something else.
-# -#

In addition, there are several "magic" colour indexes used by Excel:
-# 0x18 (BIFF3-BIFF4), 0x40 (BIFF5-BIFF8): System window text colour for border lines -# (used in XF, CF, and WINDOW2 records)
-# 0x19 (BIFF3-BIFF4), 0x41 (BIFF5-BIFF8): System window background colour for pattern background -# (used in XF and CF records )
-# 0x43: System face colour (dialogue background colour)
-# 0x4D: System window text colour for chart border lines
-# 0x4E: System window background colour for chart areas
-# 0x4F: Automatic colour for chart border lines (seems to be always Black)
-# 0x50: System ToolTip background colour (used in note objects)
-# 0x51: System ToolTip text colour (used in note objects)
-# 0x7FFF: System window text colour for fonts (used in FONT and CF records)
-# Note 0x7FFF appears to be the *default* colour index. It appears quite often in FONT -# records.
-# -#

Default Formatting

-# -# Default formatting is applied to all empty cells (those not described by a cell record). -# Firstly row default information (ROW record, Rowinfo class) is used if available. -# Failing that, column default information (COLINFO record, Colinfo class) is used if available. -# As a last resort the worksheet/workbook default cell format will be used; this -# should always be present in an Excel file, -# described by the XF record with the fixed index 15 (0-based). By default, it uses the -# worksheet/workbook default cell style, described by the very first XF record (index 0). -# -#

Formatting features not included in xlrd version 0.6.1

-#
    -#
  • Rich text i.e. strings containing partial bold italic -# and underlined text, change of font inside a string, etc. -# See OOo docs s3.4 and s3.2. -# Rich text is included in version 0.7.2
  • -#
  • Asian phonetic text (known as "ruby"), used for Japanese furigana. See OOo docs -# s3.4.2 (p15)
  • -#
  • Conditional formatting. See OOo docs -# s5.12, s6.21 (CONDFMT record), s6.16 (CF record)
  • -#
  • Miscellaneous sheet-level and book-level items e.g. printing layout, screen panes.
  • -#
  • Modern Excel file versions don't keep most of the built-in -# "number formats" in the file; Excel loads formats according to the -# user's locale. Currently xlrd's emulation of this is limited to -# a hard-wired table that applies to the US English locale. This may mean -# that currency symbols, date order, thousands separator, decimals separator, etc -# are inappropriate. Note that this does not affect users who are copying XLS -# files, only those who are visually rendering cells.
  • -#
-# -#

Loading worksheets on demand

-# -#

This feature, new in version 0.7.1, is governed by the on_demand argument -# to the open_workbook() function and allows saving memory and time by loading -# only those sheets that the caller is interested in, and releasing sheets -# when no longer required.

-# -#

on_demand=False (default): No change. open_workbook() loads global data -# and all sheets, releases resources no longer required (principally the -# str or mmap object containing the Workbook stream), and returns.

-# -#

on_demand=True and BIFF version < 5.0: A warning message is emitted, -# on_demand is recorded as False, and the old process is followed.

-# -#

on_demand=True and BIFF version >= 5.0: open_workbook() loads global -# data and returns without releasing resources. At this stage, the only -# information available about sheets is Book.nsheets and Book.sheet_names().

-# -#

Book.sheet_by_name() and Book.sheet_by_index() will load the requested -# sheet if it is not already loaded.

-# -#

Book.sheets() will load all/any unloaded sheets.

-# -#

The caller may save memory by calling -# Book.unload_sheet(sheet_name_or_index) when finished with the sheet. -# This applies irrespective of the state of on_demand.

-# -#

The caller may re-load an unloaded sheet by calling Book.sheet_by_xxxx() -# -- except if those required resources have been released (which will -# have happened automatically when on_demand is false). This is the only -# case where an exception will be raised.

-# -#

The caller may query the state of a sheet: -# Book.sheet_loaded(sheet_name_or_index) -> a bool

-# -#

Book.release_resources() may used to save memory and close -# any memory-mapped file before proceding to examine already-loaded -# sheets. Once resources are released, no further sheets can be loaded.

-# -#

When using on-demand, it is advisable to ensure that -# Book.release_resources() is always called even if an exception -# is raised in your own code; otherwise if the input file has been -# memory-mapped, the mmap.mmap object will not be closed and you will -# not be able to access the physical file until your Python process -# terminates. This can be done by calling Book.release_resources() -# explicitly in the finally suite of a try/finally block. -# New in xlrd 0.7.2: the Book object is a "context manager", so if -# using Python 2.5 or later, you can wrap your code in a "with" -# statement.

-## - -import sys, zipfile, pprint +# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. +import os +import pprint +import sys +import zipfile + from . import timemachine from .biffh import ( - XLRDError, - biff_text_from_num, + XL_CELL_BLANK, XL_CELL_BOOLEAN, XL_CELL_DATE, XL_CELL_EMPTY, XL_CELL_ERROR, + XL_CELL_NUMBER, XL_CELL_TEXT, XLRDError, biff_text_from_num, error_text_from_code, - XL_CELL_BLANK, - XL_CELL_TEXT, - XL_CELL_BOOLEAN, - XL_CELL_ERROR, - XL_CELL_EMPTY, - XL_CELL_DATE, - XL_CELL_NUMBER - ) -from .formula import * # is constrained by __all__ -from .book import Book, colname #### TODO #### formula also has `colname` (restricted to 256 cols) +) +from .book import Book, colname +from .formula import * # is constrained by __all__ +from .info import __VERSION__, __version__ from .sheet import empty_cell -from .xldate import XLDateError, xldate_as_tuple +from .xldate import XLDateError, xldate_as_datetime, xldate_as_tuple +from .xlsx import X12Book if sys.version.startswith("IronPython"): # print >> sys.stderr, "...importing encodings" @@ -333,67 +30,86 @@ MMAP_AVAILABLE = 0 USE_MMAP = MMAP_AVAILABLE -## -# -# Open a spreadsheet file for data extraction. -# -# @param filename The path to the spreadsheet file to be opened. -# -# @param logfile An open file to which messages and diagnostics are written. -# -# @param verbosity Increases the volume of trace material written to the logfile. -# -# @param use_mmap Whether to use the mmap module is determined heuristically. -# Use this arg to override the result. Current heuristic: mmap is used if it exists. -# -# @param file_contents ... as a string or an mmap.mmap object or some other behave-alike object. -# If file_contents is supplied, filename will not be used, except (possibly) in messages. -# -# @param encoding_override Used to overcome missing or bad codepage information -# in older-version files. Refer to discussion in the Unicode section above. -#
-- New in version 0.6.0 -# -# @param formatting_info Governs provision of a reference to an XF (eXtended Format) object -# for each cell in the worksheet. -#
Default is False. This is backwards compatible and saves memory. -# "Blank" cells (those with their own formatting information but no data) are treated as empty -# (by ignoring the file's BLANK and MULBLANK records). -# It cuts off any bottom "margin" of rows of empty (and blank) cells and -# any right "margin" of columns of empty (and blank) cells. -# Only cell_value and cell_type are available. -#
True provides all cells, including empty and blank cells. -# XF information is available for each cell. -#
-- New in version 0.6.1 -# -# @param on_demand Governs whether sheets are all loaded initially or when demanded -# by the caller. Please refer back to the section "Loading worksheets on demand" for details. -#
-- New in version 0.7.1 -# -# @param ragged_rows False (the default) means all rows are padded out with empty cells so that all -# rows have the same size (Sheet.ncols). True means that there are no empty cells at the ends of rows. -# This can result in substantial memory savings if rows are of widely varying sizes. See also the -# Sheet.row_len() method. -#
-- New in version 0.7.2 -# -# @return An instance of the Book class. - def open_workbook(filename=None, - logfile=sys.stdout, - verbosity=0, - use_mmap=USE_MMAP, - file_contents=None, - encoding_override=None, - formatting_info=False, - on_demand=False, - ragged_rows=False, - ): + logfile=sys.stdout, + verbosity=0, + use_mmap=USE_MMAP, + file_contents=None, + encoding_override=None, + formatting_info=False, + on_demand=False, + ragged_rows=False): + """ + Open a spreadsheet file for data extraction. + + :param filename: The path to the spreadsheet file to be opened. + + :param logfile: An open file to which messages and diagnostics are written. + + :param verbosity: Increases the volume of trace material written to the + logfile. + + :param use_mmap: + + Whether to use the mmap module is determined heuristically. + Use this arg to override the result. + + Current heuristic: mmap is used if it exists. + + :param file_contents: + + A string or an :class:`mmap.mmap` object or some other behave-alike + object. If ``file_contents`` is supplied, ``filename`` will not be used, + except (possibly) in messages. + + :param encoding_override: + + Used to overcome missing or bad codepage information + in older-version files. See :doc:`unicode`. + + :param formatting_info: + + The default is ``False``, which saves memory. + In this case, "Blank" cells, which are those with their own formatting + information but no data, are treated as empty by ignoring the file's + ``BLANK`` and ``MULBLANK`` records. + This cuts off any bottom or right "margin" of rows of empty or blank + cells. + Only :meth:`~xlrd.sheet.Sheet.cell_value` and + :meth:`~xlrd.sheet.Sheet.cell_type` are available. + + When ``True``, formatting information will be read from the spreadsheet + file. This provides all cells, including empty and blank cells. + Formatting information is available for each cell. + + Note that this will raise a NotImplementedError when used with an + xlsx file. + + :param on_demand: + + Governs whether sheets are all loaded initially or when demanded + by the caller. See :doc:`on_demand`. + + :param ragged_rows: + + The default of ``False`` means all rows are padded out with empty cells so + that all rows have the same size as found in + :attr:`~xlrd.sheet.Sheet.ncols`. + + ``True`` means that there are no empty cells at the ends of rows. + This can result in substantial memory savings if rows are of widely + varying sizes. See also the :meth:`~xlrd.sheet.Sheet.row_len` method. + + :returns: An instance of the :class:`~xlrd.book.Book` class. + """ + peeksz = 4 if file_contents: peek = file_contents[:peeksz] else: - f = open(filename, "rb") - peek = f.read(peeksz) - f.close() + filename = os.path.expanduser(filename) + with open(filename, "rb") as f: + peek = f.read(peeksz) if peek == b"PK\x03\x04": # a ZIP file if file_contents: zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents)) @@ -403,7 +119,7 @@ def open_workbook(filename=None, # Workaround for some third party files that use forward slashes and # lower case names. We map the expected name in lowercase to the # actual filename in the zip container. - component_names = dict([(name.replace('\\', '/').lower(), name) + component_names = dict([(X12Book.convert_filename(name), name) for name in zf.namelist()]) if verbosity: @@ -420,7 +136,7 @@ def open_workbook(filename=None, formatting_info=formatting_info, on_demand=on_demand, ragged_rows=ragged_rows, - ) + ) return bk if 'xl/workbook.bin' in component_names: raise XLRDError('Excel 2007 xlsb file; not supported') @@ -439,28 +155,32 @@ def open_workbook(filename=None, formatting_info=formatting_info, on_demand=on_demand, ragged_rows=ragged_rows, - ) + ) return bk -## -# For debugging: dump an XLS file's BIFF records in char & hex. -# @param filename The path to the file to be dumped. -# @param outfile An open file, to which the dump is written. -# @param unnumbered If true, omit offsets (for meaningful diffs). def dump(filename, outfile=sys.stdout, unnumbered=False): + """ + For debugging: dump an XLS file's BIFF records in char & hex. + + :param filename: The path to the file to be dumped. + :param outfile: An open file, to which the dump is written. + :param unnumbered: If true, omit offsets (for meaningful diffs). + """ from .biffh import biff_dump bk = Book() bk.biff2_8_load(filename=filename, logfile=outfile, ) biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered) -## -# For debugging and analysis: summarise the file's BIFF records. -# I.e. produce a sorted file of (record_name, count). -# @param filename The path to the file to be summarised. -# @param outfile An open file, to which the summary is written. def count_records(filename, outfile=sys.stdout): + """ + For debugging and analysis: summarise the file's BIFF records. + ie: produce a sorted file of ``(record_name, count)``. + + :param filename: The path to the file to be summarised. + :param outfile: An open file, to which the summary is written. + """ from .biffh import biff_count_records bk = Book() bk.biff2_8_load(filename=filename, logfile=outfile, ) diff --git a/SUEWSPrepare/Modules/xlrd/biffh.py b/SUEWSPrepare/Modules/xlrd/biffh.py index f3a6d4d..07ac629 100644 --- a/SUEWSPrepare/Modules/xlrd/biffh.py +++ b/SUEWSPrepare/Modules/xlrd/biffh.py @@ -1,47 +1,40 @@ -# -*- coding: cp1252 -*- - -## -# Support module for the xlrd package. -# -#

Portions copyright 2005-2010 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a BSD-style licence.

-## - -# 2010-03-01 SJM Reading SCL record -# 2010-03-01 SJM Added more record IDs for biff_dump & biff_count -# 2008-02-10 SJM BIFF2 BLANK record -# 2008-02-08 SJM Preparation for Excel 2.0 support -# 2008-02-02 SJM Added suffixes (_B2, _B2_ONLY, etc) on record names for biff_dump & biff_count -# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files. -# 2007-09-08 SJM Avoid crash when zero-length Unicode string missing options byte. -# 2007-04-22 SJM Remove experimental "trimming" facility. - +# -*- coding: utf-8 -*- +# Portions copyright © 2005-2010 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. from __future__ import print_function -DEBUG = 0 - -from struct import unpack import sys +from struct import unpack + from .timemachine import * +DEBUG = 0 + + + class XLRDError(Exception): - pass + """ + An exception indicating problems reading data from an Excel file. + """ -## -# Parent of almost all other classes in the package. Defines a common "dump" method -# for debugging. class BaseObject(object): + """ + Parent of almost all other classes in the package. Defines a common + :meth:`dump` method for debugging. + """ _repr_these = [] - ## - # @param f open file object, to which the dump is written - # @param header text to write before the dump - # @param footer text to write after the dump - # @param indent number of leading spaces (for recursive calls) def dump(self, f=None, header=None, footer=None, indent=0): + """ + :param f: open file object, to which the dump is written + :param header: text to write before the dump + :param footer: text to write after the dump + :param indent: number of leading spaces (for recursive calls) + """ if f is None: f = sys.stderr if hasattr(self, "__slots__"): @@ -60,9 +53,8 @@ def dump(self, f=None, header=None, footer=None, indent=0): value.dump(f, header="%s%s (%s object):" % (pad, attr, value.__class__.__name__), indent=indent+4) - elif attr not in self._repr_these and ( - isinstance(value, list_type) or isinstance(value, dict_type) - ): + elif (attr not in self._repr_these and + (isinstance(value, list_type) or isinstance(value, dict_type))): print("%s%s: %s, len = %d" % (pad, attr, type(value), len(value)), file=f) else: fprintf(f, "%s%s: %r\n", pad, attr, value) @@ -93,21 +85,10 @@ def dump(self, f=None, header=None, footer=None, indent=0): 70: "7", 80: "8", 85: "8X", - } - -## -#

This dictionary can be used to produce a text version of the internal codes -# that Excel uses for error cells. Here are its contents: -#

-# 0x00: '#NULL!',  # Intersection of two cell ranges is empty
-# 0x07: '#DIV/0!', # Division by zero
-# 0x0F: '#VALUE!', # Wrong type of operand
-# 0x17: '#REF!',   # Illegal or deleted cell reference
-# 0x1D: '#NAME?',  # Wrong function or range name
-# 0x24: '#NUM!',   # Value range overflow
-# 0x2A: '#N/A',    # Argument or function not available
-# 

+} +#: This dictionary can be used to produce a text version of the internal codes +#: that Excel uses for error cells. error_text_from_code = { 0x00: '#NULL!', # Intersection of two cell ranges is empty 0x07: '#DIV/0!', # Division by zero @@ -247,7 +228,7 @@ def dump(self, f=None, header=None, footer=None, indent=0): XL_NUMBER, XL_RK, XL_RSTRING, - ] +] _cell_opcode_dict = {} for _cell_opcode in _cell_opcode_list: _cell_opcode_dict[_cell_opcode] = 1 @@ -352,8 +333,7 @@ def unpack_unicode_update_pos(data, pos, lenlen=2, known_len=None): pos += sz return (strg, pos) -def unpack_cell_range_address_list_update_pos( - output_list, data, pos, biff_version, addr_size=6): +def unpack_cell_range_address_list_update_pos(output_list, data, pos, biff_version, addr_size=6): # output_list is updated in situ assert addr_size in (6, 8) # Used to assert size == 6 if not BIFF8, but pyWLWriter writes @@ -553,8 +533,8 @@ def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False): '??? hex_char_dump: ofs=%d dlen=%d base=%d -> endpos=%d pos=%d endsub=%d substrg=%r\n', ofs, dlen, base, endpos, pos, endsub, substrg) break - hexd = ''.join(["%02x " % BYTES_ORD(c) for c in substrg]) - + hexd = ''.join("%02x " % BYTES_ORD(c) for c in substrg) + chard = '' for c in substrg: c = chr(BYTES_ORD(c)) @@ -565,7 +545,7 @@ def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False): chard += c if numbered: num_prefix = "%5d: " % (base+pos-ofs) - + fprintf(fout, "%s %-48s %s\n", num_prefix, hexd, chard) pos = endsub @@ -648,7 +628,7 @@ def biff_count_records(mem, stream_offset, stream_len, fout=sys.stdout): 10081: 'mac_turkish', # guess 32768: 'mac_roman', 32769: 'cp1252', - } +} # some more guessing, for Indic scripts # codepage 57000 range: # 2 Devanagari [0] diff --git a/SUEWSPrepare/Modules/xlrd/book.py b/SUEWSPrepare/Modules/xlrd/book.py index 7bb01b4..6e6e8ea 100644 --- a/SUEWSPrepare/Modules/xlrd/book.py +++ b/SUEWSPrepare/Modules/xlrd/book.py @@ -4,18 +4,16 @@ from __future__ import print_function -from .timemachine import * -from .biffh import * -import struct; unpack = struct.unpack +import gc import sys import time -from . import sheet -from . import compdoc + +from . import compdoc, formatting, sheet +from .biffh import * from .formula import * -from . import formatting -if sys.version.startswith("IronPython"): - # print >> sys.stderr, "...importing encodings" - import encodings +from .timemachine import * + +import struct; unpack = struct.unpack empty_cell = sheet.empty_cell # for exposure to the world ... @@ -24,7 +22,6 @@ USE_FANCY_CD = 1 TOGGLE_GC = 0 -import gc # gc.set_debug(gc.DEBUG_STATS) try: @@ -55,7 +52,7 @@ "Auto_Deactivate": "\x0B", "Sheet_Title": "\x0C", "_FilterDatabase": "\x0D", - } +} builtin_name_from_code = {} code_from_builtin_name = {} for _bin, _bic in _code_from_builtin_name.items(): @@ -66,11 +63,10 @@ del _bin, _bic, _code_from_builtin_name def open_workbook_xls(filename=None, - logfile=sys.stdout, verbosity=0, use_mmap=USE_MMAP, - file_contents=None, - encoding_override=None, - formatting_info=False, on_demand=False, ragged_rows=False, - ): + logfile=sys.stdout, verbosity=0, use_mmap=USE_MMAP, + file_contents=None, + encoding_override=None, + formatting_info=False, on_demand=False, ragged_rows=False): t0 = time.clock() if TOGGLE_GC: orig_gc_enabled = gc.isenabled() @@ -85,7 +81,7 @@ def open_workbook_xls(filename=None, formatting_info=formatting_info, on_demand=on_demand, ragged_rows=ragged_rows, - ) + ) t1 = time.clock() bk.load_time_stage_1 = t1 - t0 biff_version = bk.getbof(XL_WORKBOOK_GLOBALS) @@ -95,7 +91,7 @@ def open_workbook_xls(filename=None, raise XLRDError( "BIFF version %s is not supported" % biff_text_from_num[biff_version] - ) + ) bk.biff_version = biff_version if biff_version <= 40: # no workbook globals, only 1 worksheet @@ -119,11 +115,12 @@ def open_workbook_xls(filename=None, bk.get_sheets() bk.nsheets = len(bk._sheet_list) if biff_version == 45 and bk.nsheets > 1: - fprintf(bk.logfile, + fprintf( + bk.logfile, "*** WARNING: Excel 4.0 workbook (.XLW) file contains %d worksheets.\n" "*** Book-level data will be that of the last worksheet.\n", bk.nsheets - ) + ) if TOGGLE_GC: if orig_gc_enabled: gc.enable() @@ -137,107 +134,87 @@ def open_workbook_xls(filename=None, bk.release_resources() return bk -## -# For debugging: dump the file's BIFF records in char & hex. -# @param filename The path to the file to be dumped. -# @param outfile An open file, to which the dump is written. -# @param unnumbered If true, omit offsets (for meaningful diffs). - -def dump(filename, outfile=sys.stdout, unnumbered=False): - bk = Book() - bk.biff2_8_load(filename=filename, logfile=outfile, ) - biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered) - -## -# For debugging and analysis: summarise the file's BIFF records. -# I.e. produce a sorted file of (record_name, count). -# @param filename The path to the file to be summarised. -# @param outfile An open file, to which the summary is written. - -def count_records(filename, outfile=sys.stdout): - bk = Book() - bk.biff2_8_load(filename=filename, logfile=outfile, ) - biff_count_records(bk.mem, bk.base, bk.stream_len, outfile) - -## -# Information relating to a named reference, formula, macro, etc. -#
-- New in version 0.6.0 -#
-- Name information is not extracted from files older than -# Excel 5.0 (Book.biff_version < 50) class Name(BaseObject): + """ + Information relating to a named reference, formula, macro, etc. + .. note:: + + Name information is **not** extracted from files older than + Excel 5.0 (``Book.biff_version < 50``) + """ _repr_these = ['stack'] book = None # parent - ## - # 0 = Visible; 1 = Hidden + #: 0 = Visible; 1 = Hidden hidden = 0 - ## - # 0 = Command macro; 1 = Function macro. Relevant only if macro == 1 + #: 0 = Command macro; 1 = Function macro. Relevant only if macro == 1 func = 0 - ## - # 0 = Sheet macro; 1 = VisualBasic macro. Relevant only if macro == 1 + #: 0 = Sheet macro; 1 = VisualBasic macro. Relevant only if macro == 1 vbasic = 0 - ## - # 0 = Standard name; 1 = Macro name + #: 0 = Standard name; 1 = Macro name macro = 0 - ## - # 0 = Simple formula; 1 = Complex formula (array formula or user defined)
- # No examples have been sighted. + #: 0 = Simple formula; 1 = Complex formula (array formula or user defined). + #: + #: .. note:: No examples have been sighted. complex = 0 - ## - # 0 = User-defined name; 1 = Built-in name - # (common examples: Print_Area, Print_Titles; see OOo docs for full list) + #: 0 = User-defined name; 1 = Built-in name + #: + #: Common examples: ``Print_Area``, ``Print_Titles``; see OOo docs for + #: full list builtin = 0 - ## - # Function group. Relevant only if macro == 1; see OOo docs for values. + #: Function group. Relevant only if macro == 1; see OOo docs for values. funcgroup = 0 - ## - # 0 = Formula definition; 1 = Binary data
No examples have been sighted. + #: 0 = Formula definition; 1 = Binary data + #: + #: .. note:: No examples have been sighted. binary = 0 - ## - # The index of this object in book.name_obj_list + #: The index of this object in book.name_obj_list name_index = 0 - ## # A Unicode string. If builtin, decoded as per OOo docs. name = UNICODE_LITERAL("") - ## - # An 8-bit string. + #: An 8-bit string. raw_formula = b'' - ## - # -1: The name is global (visible in all calculation sheets).
- # -2: The name belongs to a macro sheet or VBA sheet.
- # -3: The name is invalid.
- # 0 <= scope < book.nsheets: The name is local to the sheet whose index is scope. + #: ``-1``: + #: The name is global (visible in all calculation sheets). + #: ``-2``: + #: The name belongs to a macro sheet or VBA sheet. + #: ``-3``: + #: The name is invalid. + #: ``0 <= scope < book.nsheets``: + #: The name is local to the sheet whose index is scope. scope = -1 - ## - # The result of evaluating the formula, if any. - # If no formula, or evaluation of the formula encountered problems, - # the result is None. Otherwise the result is a single instance of the - # Operand class. + #: The result of evaluating the formula, if any. + #: If no formula, or evaluation of the formula encountered problems, + #: the result is ``None``. Otherwise the result is a single instance of the + #: :class:`~xlrd.formula.Operand` class. # result = None - ## - # This is a convenience method for the frequent use case where the name - # refers to a single cell. - # @return An instance of the Cell class. - # @throws XLRDError The name is not a constant absolute reference - # to a single cell. def cell(self): + """ + This is a convenience method for the frequent use case where the name + refers to a single cell. + + :returns: An instance of the :class:`~xlrd.sheet.Cell` class. + + :raises xlrd.biffh.XLRDError: + The name is not a constant absolute reference + to a single cell. + """ res = self.result if res: # result should be an instance of the Operand class @@ -245,28 +222,36 @@ def cell(self): value = res.value if kind == oREF and len(value) == 1: ref3d = value[0] - if (0 <= ref3d.shtxlo == ref3d.shtxhi - 1 - and ref3d.rowxlo == ref3d.rowxhi - 1 - and ref3d.colxlo == ref3d.colxhi - 1): + if (0 <= ref3d.shtxlo == ref3d.shtxhi - 1 and + ref3d.rowxlo == ref3d.rowxhi - 1 and + ref3d.colxlo == ref3d.colxhi - 1): sh = self.book.sheet_by_index(ref3d.shtxlo) return sh.cell(ref3d.rowxlo, ref3d.colxlo) - self.dump(self.book.logfile, + self.dump( + self.book.logfile, header="=== Dump of Name object ===", footer="======= End of dump =======", - ) + ) raise XLRDError("Not a constant absolute reference to a single cell") - ## - # This is a convenience method for the use case where the name - # refers to one rectangular area in one worksheet. - # @param clipped If true (the default), the returned rectangle is clipped - # to fit in (0, sheet.nrows, 0, sheet.ncols) -- it is guaranteed that - # 0 <= rowxlo <= rowxhi <= sheet.nrows and that the number of usable rows - # in the area (which may be zero) is rowxhi - rowxlo; likewise for columns. - # @return a tuple (sheet_object, rowxlo, rowxhi, colxlo, colxhi). - # @throws XLRDError The name is not a constant absolute reference - # to a single area in a single sheet. def area2d(self, clipped=True): + """ + This is a convenience method for the use case where the name + refers to one rectangular area in one worksheet. + + :param clipped: + If ``True``, the default, the returned rectangle is clipped + to fit in ``(0, sheet.nrows, 0, sheet.ncols)``. + it is guaranteed that ``0 <= rowxlo <= rowxhi <= sheet.nrows`` and + that the number of usable rows in the area (which may be zero) is + ``rowxhi - rowxlo``; likewise for columns. + + :returns: a tuple ``(sheet_object, rowxlo, rowxhi, colxlo, colxhi)``. + + :raises xlrd.biffh.XLRDError: + The name is not a constant absolute reference + to a single area in a single sheet. + """ res = self.result if res: # result should be an instance of the Operand class @@ -285,173 +270,223 @@ def area2d(self, clipped=True): assert 0 <= rowxlo <= rowxhi <= sh.nrows assert 0 <= colxlo <= colxhi <= sh.ncols return sh, rowxlo, rowxhi, colxlo, colxhi - self.dump(self.book.logfile, + self.dump( + self.book.logfile, header="=== Dump of Name object ===", footer="======= End of dump =======", - ) + ) raise XLRDError("Not a constant absolute reference to a single area in a single sheet") -## -# Contents of a "workbook". -#

WARNING: You don't call this class yourself. You use the Book object that -# was returned when you called xlrd.open_workbook("myfile.xls").

class Book(BaseObject): + """ + Contents of a "workbook". - ## - # The number of worksheets present in the workbook file. - # This information is available even when no sheets have yet been loaded. - nsheets = 0 + .. warning:: - ## - # Which date system was in force when this file was last saved.
- # 0 => 1900 system (the Excel for Windows default).
- # 1 => 1904 system (the Excel for Macintosh default).
- datemode = 0 # In case it's not specified in the file. + You should not instantiate this class yourself. You use the :class:`Book` + object that was returned when you called :func:`~xlrd.open_workbook`. + """ - ## - # Version of BIFF (Binary Interchange File Format) used to create the file. - # Latest is 8.0 (represented here as 80), introduced with Excel 97. - # Earliest supported by this module: 2.0 (represented as 20). + #: The number of worksheets present in the workbook file. + #: This information is available even when no sheets have yet been loaded. + nsheets = 0 + + #: Which date system was in force when this file was last saved. + #: + #: 0: + #: 1900 system (the Excel for Windows default). + #: + #: 1: + #: 1904 system (the Excel for Macintosh default). + #: + #: Defaults to 0 in case it's not specified in the file. + datemode = 0 + + #: Version of BIFF (Binary Interchange File Format) used to create the file. + #: Latest is 8.0 (represented here as 80), introduced with Excel 97. + #: Earliest supported by this module: 2.0 (represented as 20). biff_version = 0 - ## - # List containing a Name object for each NAME record in the workbook. - #
-- New in version 0.6.0 + #: List containing a :class:`Name` object for each ``NAME`` record in the + #: workbook. + #: + #: .. versionadded:: 0.6.0 name_obj_list = [] - ## - # An integer denoting the character set used for strings in this file. - # For BIFF 8 and later, this will be 1200, meaning Unicode; more precisely, UTF_16_LE. - # For earlier versions, this is used to derive the appropriate Python encoding - # to be used to convert to Unicode. - # Examples: 1252 -> 'cp1252', 10000 -> 'mac_roman' + #: An integer denoting the character set used for strings in this file. + #: For BIFF 8 and later, this will be 1200, meaning Unicode; + #: more precisely, UTF_16_LE. + #: For earlier versions, this is used to derive the appropriate Python + #: encoding to be used to convert to Unicode. + #: Examples: ``1252 -> 'cp1252'``, ``10000 -> 'mac_roman'`` codepage = None - ## - # The encoding that was derived from the codepage. + #: The encoding that was derived from the codepage. encoding = None - ## - # A tuple containing the (telephone system) country code for:
- # [0]: the user-interface setting when the file was created.
- # [1]: the regional settings.
- # Example: (1, 61) meaning (USA, Australia). - # This information may give a clue to the correct encoding for an unknown codepage. - # For a long list of observed values, refer to the OpenOffice.org documentation for - # the COUNTRY record. + #: A tuple containing the telephone country code for: + #: + #: ``[0]``: + #: the user-interface setting when the file was created. + #: + #: ``[1]``: + #: the regional settings. + #: + #: Example: ``(1, 61)`` meaning ``(USA, Australia)``. + #: + #: This information may give a clue to the correct encoding for an + #: unknown codepage. For a long list of observed values, refer to the + #: OpenOffice.org documentation for the ``COUNTRY`` record. countries = (0, 0) - ## - # What (if anything) is recorded as the name of the last user to save the file. + #: What (if anything) is recorded as the name of the last user to + #: save the file. user_name = UNICODE_LITERAL('') - ## - # A list of Font class instances, each corresponding to a FONT record. - #
-- New in version 0.6.1 + #: A list of :class:`~xlrd.formatting.Font` class instances, + #: each corresponding to a FONT record. + #: + #: .. versionadded:: 0.6.1 font_list = [] - ## - # A list of XF class instances, each corresponding to an XF record. - #
-- New in version 0.6.1 + #: A list of :class:`~xlrd.formatting.XF` class instances, + #: each corresponding to an ``XF`` record. + #: + #: .. versionadded:: 0.6.1 xf_list = [] - ## - # A list of Format objects, each corresponding to a FORMAT record, in - # the order that they appear in the input file. - # It does not contain builtin formats. - # If you are creating an output file using (for example) pyExcelerator, - # use this list. - # The collection to be used for all visual rendering purposes is format_map. - #
-- New in version 0.6.1 + #: A list of :class:`~xlrd.formatting.Format` objects, each corresponding to + #: a ``FORMAT`` record, in the order that they appear in the input file. + #: It does *not* contain builtin formats. + #: + #: If you are creating an output file using (for example) :mod:`xlwt`, + #: use this list. + #: + #: The collection to be used for all visual rendering purposes is + #: :attr:`format_map`. + #: + #: .. versionadded:: 0.6.1 format_list = [] ## - # The mapping from XF.format_key to Format object. - #
-- New in version 0.6.1 + #: The mapping from :attr:`~xlrd.formatting.XF.format_key` to + #: :class:`~xlrd.formatting.Format` object. + #: + #: .. versionadded:: 0.6.1 format_map = {} - ## - # This provides access via name to the extended format information for - # both built-in styles and user-defined styles.
- # It maps name to (built_in, xf_index), where:
- # name is either the name of a user-defined style, - # or the name of one of the built-in styles. Known built-in names are - # Normal, RowLevel_1 to RowLevel_7, - # ColLevel_1 to ColLevel_7, Comma, Currency, Percent, "Comma [0]", - # "Currency [0]", Hyperlink, and "Followed Hyperlink".
- # built_in 1 = built-in style, 0 = user-defined
- # xf_index is an index into Book.xf_list.
- # References: OOo docs s6.99 (STYLE record); Excel UI Format/Style - #
-- New in version 0.6.1; since 0.7.4, extracted only if - # open_workbook(..., formatting_info=True) + #: This provides access via name to the extended format information for + #: both built-in styles and user-defined styles. + #: + #: It maps ``name`` to ``(built_in, xf_index)``, where + #: ``name`` is either the name of a user-defined style, + #: or the name of one of the built-in styles. Known built-in names are + #: Normal, RowLevel_1 to RowLevel_7, + #: ColLevel_1 to ColLevel_7, Comma, Currency, Percent, "Comma [0]", + #: "Currency [0]", Hyperlink, and "Followed Hyperlink". + #: + #: ``built_in`` has the following meanings + #: + #: 1: + #: built-in style + #: + #: 0: + #: user-defined + #: + #: ``xf_index`` is an index into :attr:`Book.xf_list`. + #: + #: References: OOo docs s6.99 (``STYLE`` record); Excel UI Format/Style + #: + #: .. versionadded:: 0.6.1 + #: + #: Extracted only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.7.4 style_name_map = {} - ## - # This provides definitions for colour indexes. Please refer to the - # above section "The Palette; Colour Indexes" for an explanation - # of how colours are represented in Excel.
- # Colour indexes into the palette map into (red, green, blue) tuples. - # "Magic" indexes e.g. 0x7FFF map to None. - # colour_map is what you need if you want to render cells on screen or in a PDF - # file. If you are writing an output XLS file, use palette_record. - #
-- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True) + #: This provides definitions for colour indexes. Please refer to + #: :ref:`palette` for an explanation + #: of how colours are represented in Excel. + #: + #: Colour indexes into the palette map into ``(red, green, blue)`` tuples. + #: "Magic" indexes e.g. ``0x7FFF`` map to ``None``. + #: + #: :attr:`colour_map` is what you need if you want to render cells on screen + #: or in a PDF file. If you are writing an output XLS file, use + #: :attr:`palette_record`. + #: + #: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.6.1 colour_map = {} - ## - # If the user has changed any of the colours in the standard palette, the XLS - # file will contain a PALETTE record with 56 (16 for Excel 4.0 and earlier) - # RGB values in it, and this list will be e.g. [(r0, b0, g0), ..., (r55, b55, g55)]. - # Otherwise this list will be empty. This is what you need if you are - # writing an output XLS file. If you want to render cells on screen or in a PDF - # file, use colour_map. - #
-- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True) + #: If the user has changed any of the colours in the standard palette, the + #: XLS file will contain a ``PALETTE`` record with 56 (16 for Excel 4.0 and + #: earlier) RGB values in it, and this list will be e.g. + #: ``[(r0, b0, g0), ..., (r55, b55, g55)]``. + #: Otherwise this list will be empty. This is what you need if you are + #: writing an output XLS file. If you want to render cells on screen or in a + #: PDF file, use :attr:`colour_map`. + #: + #: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.6.1 palette_record = [] - ## - # Time in seconds to extract the XLS image as a contiguous string (or mmap equivalent). + #: Time in seconds to extract the XLS image as a contiguous string + #: (or mmap equivalent). load_time_stage_1 = -1.0 - ## - # Time in seconds to parse the data from the contiguous string (or mmap equivalent). + #: Time in seconds to parse the data from the contiguous string + #: (or mmap equivalent). load_time_stage_2 = -1.0 - ## - # @return A list of all sheets in the book. - # All sheets not already loaded will be loaded. def sheets(self): + """ + :returns: A list of all sheets in the book. + + All sheets not already loaded will be loaded. + """ for sheetx in xrange(self.nsheets): if not self._sheet_list[sheetx]: self.get_sheet(sheetx) return self._sheet_list[:] - ## - # @param sheetx Sheet index in range(nsheets) - # @return An object of the Sheet class def sheet_by_index(self, sheetx): + """ + :param sheetx: Sheet index in ``range(nsheets)`` + :returns: A :class:`~xlrd.sheet.Sheet`. + """ return self._sheet_list[sheetx] or self.get_sheet(sheetx) - ## - # @param sheet_name Name of sheet required - # @return An object of the Sheet class def sheet_by_name(self, sheet_name): + """ + :param sheet_name: Name of the sheet required. + :returns: A :class:`~xlrd.sheet.Sheet`. + """ try: sheetx = self._sheet_names.index(sheet_name) except ValueError: raise XLRDError('No sheet named <%r>' % sheet_name) return self.sheet_by_index(sheetx) - ## - # @return A list of the names of all the worksheets in the workbook file. - # This information is available even when no sheets have yet been loaded. def sheet_names(self): + """ + :returns: + A list of the names of all the worksheets in the workbook file. + This information is available even when no sheets have yet been + loaded. + """ return self._sheet_names[:] - ## - # @param sheet_name_or_index Name or index of sheet enquired upon - # @return true if sheet is loaded, false otherwise - #
-- New in version 0.7.1 def sheet_loaded(self, sheet_name_or_index): + """ + :param sheet_name_or_index: Name or index of sheet enquired upon + :returns: ``True`` if sheet is loaded, ``False`` otherwise. + + .. versionadded:: 0.7.1 + """ if isinstance(sheet_name_or_index, int): sheetx = sheet_name_or_index else: @@ -461,10 +496,12 @@ def sheet_loaded(self, sheet_name_or_index): raise XLRDError('No sheet named <%r>' % sheet_name_or_index) return bool(self._sheet_list[sheetx]) - ## - # @param sheet_name_or_index Name or index of sheet to be unloaded. - #
-- New in version 0.7.1 def unload_sheet(self, sheet_name_or_index): + """ + :param sheet_name_or_index: Name or index of sheet to be unloaded. + + .. versionadded:: 0.7.1 + """ if isinstance(sheet_name_or_index, int): sheetx = sheet_name_or_index else: @@ -473,17 +510,19 @@ def unload_sheet(self, sheet_name_or_index): except ValueError: raise XLRDError('No sheet named <%r>' % sheet_name_or_index) self._sheet_list[sheetx] = None - - ## - # This method has a dual purpose. You can call it to release - # memory-consuming objects and (possibly) a memory-mapped file - # (mmap.mmap object) when you have finished loading sheets in - # on_demand mode, but still require the Book object to examine the - # loaded sheets. It is also called automatically (a) when open_workbook - # raises an exception and (b) if you are using a "with" statement, when - # the "with" block is exited. Calling this method multiple times on the - # same object has no ill effect. + def release_resources(self): + """ + This method has a dual purpose. You can call it to release + memory-consuming objects and (possibly) a memory-mapped file + (:class:`mmap.mmap` object) when you have finished loading sheets in + ``on_demand`` mode, but still require the :class:`Book` object to + examine the loaded sheets. It is also called automatically (a) when + :func:`~xlrd.open_workbook` + raises an exception and (b) if you are using a ``with`` statement, when + the ``with`` block is exited. Calling this method multiple times on the + same object has no ill effect. + """ self._resources_released = 1 if hasattr(self.mem, "close"): # must be a mmap.mmap object @@ -494,24 +533,25 @@ def release_resources(self): self.filestr = None self._sharedstrings = None self._rich_text_runlist_map = None - + def __enter__(self): return self - + def __exit__(self, exc_type, exc_value, exc_tb): self.release_resources() - # return false + # return false - ## - # A mapping from (lower_case_name, scope) to a single Name object. - #
-- New in version 0.6.0 + #: A mapping from ``(lower_case_name, scope)`` to a single :class:`Name` + #: object. + #: + #: .. versionadded:: 0.6.0 name_and_scope_map = {} - ## - # A mapping from lower_case_name to a list of Name objects. The list is - # sorted in scope order. Typically there will be one item (of global scope) - # in the list. - #
-- New in version 0.6.0 + #: A mapping from `lower_case_name` to a list of :class:`Name` objects. + #: The list is sorted in scope order. Typically there will be one item + #: (of global scope) in the list. + #: + #: .. versionadded:: 0.6.0 name_map = {} def __init__(self): @@ -548,12 +588,11 @@ def __init__(self): self.filestr = b'' def biff2_8_load(self, filename=None, file_contents=None, - logfile=sys.stdout, verbosity=0, use_mmap=USE_MMAP, - encoding_override=None, - formatting_info=False, - on_demand=False, - ragged_rows=False, - ): + logfile=sys.stdout, verbosity=0, use_mmap=USE_MMAP, + encoding_override=None, + formatting_info=False, + on_demand=False, + ragged_rows=False): # DEBUG = 0 self.logfile = logfile self.verbosity = verbosity @@ -655,17 +694,18 @@ def get_sheet(self, sh_number, update_pos=True): raise XLRDError("Can't load sheets after releasing resources.") if update_pos: self._position = self._sh_abs_posn[sh_number] - _unused_biff_version = self.getbof(XL_WORKSHEET) + self.getbof(XL_WORKSHEET) # assert biff_version == self.biff_version ### FAILS # Have an example where book is v7 but sheet reports v8!!! # It appears to work OK if the sheet version is ignored. # Confirmed by Daniel Rentz: happens when Excel does "save as" # creating an old version file; ignore version details on sheet BOF. - sh = sheet.Sheet(self, - self._position, - self._sheet_names[sh_number], - sh_number, - ) + sh = sheet.Sheet( + self, + self._position, + self._sheet_names[sh_number], + sh_number, + ) sh.read(self) self._sheet_list[sh_number] = sh return sh @@ -691,7 +731,7 @@ def handle_boundsheet(self, data): bv = self.biff_version self.derive_encoding() if DEBUG: - fprintf(self.logfile, "BOUNDSHEET: bv=%d data %r\n", bv, data); + fprintf(self.logfile, "BOUNDSHEET: bv=%d data %r\n", bv, data) if bv == 45: # BIFF4W #### Not documented in OOo docs ... # In fact, the *only* data is the name of the sheet. @@ -724,7 +764,7 @@ def handle_boundsheet(self, data): 1: 'Macro sheet', 2: 'Chart', 6: 'Visual Basic module', - }.get(sheet_type, 'UNKNOWN') + }.get(sheet_type, 'UNKNOWN') if DEBUG or self.verbosity >= 1: fprintf(self.logfile, @@ -772,7 +812,7 @@ def derive_encoding(self): # If we don't have a codec that can decode ASCII into Unicode, # we're well & truly stuffed -- let the punter know ASAP. try: - _unused = unicode(b'trial', self.encoding) + unicode(b'trial', self.encoding) except BaseException as e: fprintf(self.logfile, "ERROR *** codepage %r -> encoding %r -> %s: %s\n", @@ -835,7 +875,7 @@ def handle_externsheet(self, data): self.logfile, "INFO: EXTERNSHEET needs %d bytes, have %d\n", bytes_reqd, len(data), - ) + ) code2, length2, data2 = self.get_record_parts() if code2 != XL_CONTINUE: raise XLRDError("Missing CONTINUE after EXTERNSHEET record") @@ -851,7 +891,7 @@ def handle_externsheet(self, data): self.logfile, "EXTERNSHEET(b8): k = %2d, record = %2d, first_sheet = %5d, last sheet = %5d\n", k, ref_recordx, ref_first_sheetx, ref_last_sheetx, - ) + ) else: nc, ty = unpack("> nshift) macro_flag = " M"[nobj.macro] @@ -952,7 +993,7 @@ def handle_name(self, data): self.logfile, header="--- handle_name: name[%d] ---" % name_index, footer="-------------------", - ) + ) def names_epilogue(self): blah = self.verbosity >= 2 @@ -1073,10 +1114,11 @@ def handle_supbook(self, data): # #### FIX ME #### # Should implement handling of CONTINUE record(s) ... if self.verbosity: - print(( - "*** WARNING: unpack failure in sheet %d of %d in SUPBOOK record for file %r" - % (x, num_sheets, url) - ), file=self.logfile) + print( + "*** WARNING: unpack failure in sheet %d of %d in SUPBOOK record for file %r" + % (x, num_sheets, url), + file=self.logfile, + ) break sheet_names.append(shname) if blah: fprintf(self.logfile, " sheetx=%d namelen=%d name=%r (next pos=%d)\n", x, len(shname), shname, pos) @@ -1128,7 +1170,7 @@ def handle_sst(self, data): strlist.append(data) self._sharedstrings, rt_runlist = unpack_SST_table(strlist, uniquestrings) if self.formatting_info: - self._rich_text_runlist_map = rt_runlist + self._rich_text_runlist_map = rt_runlist if DEBUG: t1 = time.time() print("SST processing took %.2f seconds" % (t1 - t0, ), file=self.logfile) @@ -1220,6 +1262,7 @@ def getbof(self, rqd_stream): # DEBUG = 1 # if DEBUG: print >> self.logfile, "getbof(): position", self._position if DEBUG: print("reqd: 0x%04x" % rqd_stream, file=self.logfile) + def bof_error(msg): raise XLRDError('Unsupported format, or corrupt file: ' + msg) savpos = self._position @@ -1236,7 +1279,7 @@ def bof_error(msg): 'Invalid length (%d) for BOF record type 0x%04x' % (length, opcode)) padding = b'\0' * max(0, boflen[opcode] - length) - data = self.read(self._position, length); + data = self.read(self._position, length) if DEBUG: fprintf(self.logfile, "\ngetbof(): data=%r\n", data) if len(data) < length: bof_error('Incomplete BOF record[2]; met end of file') @@ -1244,11 +1287,11 @@ def bof_error(msg): version1 = opcode >> 8 version2, streamtype = unpack('= 2: - print("BOF: op=0x%04x vers=0x%04x stream=0x%04x buildid=%d buildyr=%d -> BIFF%d" \ + print("BOF: op=0x%04x vers=0x%04x stream=0x%04x buildid=%d buildyr=%d -> BIFF%d" % (opcode, version2, streamtype, build, year, version), file=self.logfile) got_globals = streamtype == XL_WORKBOOK_GLOBALS or ( version == 45 and streamtype == XL_WORKBOOK_GLOBALS_4W) @@ -1287,9 +1330,9 @@ def bof_error(msg): if version >= 50 and streamtype == 0x0100: bof_error("Workspace file -- no spreadsheet data") bof_error( - 'BOF not workbook/worksheet: op=0x%04x vers=0x%04x strm=0x%04x build=%d year=%d -> BIFF%d' \ + 'BOF not workbook/worksheet: op=0x%04x vers=0x%04x strm=0x%04x build=%d year=%d -> BIFF%d' % (opcode, version2, streamtype, build, year, version) - ) + ) # === helper functions @@ -1393,7 +1436,7 @@ def unpack_SST_table(datatab, nstrings): datalen = len(data) options = local_BYTES_ORD(data[0]) pos = 1 - + if rtcount: runs = [] for runindex in xrange(rtcount): @@ -1405,7 +1448,7 @@ def unpack_SST_table(datatab, nstrings): runs.append(local_unpack("= datalen: # adjust to correct position in next record diff --git a/SUEWSPrepare/Modules/xlrd/compdoc.py b/SUEWSPrepare/Modules/xlrd/compdoc.py index e434e8e..b4632dc 100644 --- a/SUEWSPrepare/Modules/xlrd/compdoc.py +++ b/SUEWSPrepare/Modules/xlrd/compdoc.py @@ -1,28 +1,23 @@ -# -*- coding: cp1252 -*- - -## -# Implements the minimal functionality required -# to extract a "Workbook" or "Book" stream (as one big string) -# from an OLE2 Compound Document file. -#

Copyright � 2005-2012 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a BSD-style licence.

-## - -# No part of the content of this file was derived from the works of David Giffin. - -# 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer] -# 2007-09-08 SJM Warning message if sector sizes are extremely large. -# 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted. -# 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms. - +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. +# No part of the content of this file was derived from the works of +# David Giffin. +""" +Implements the minimal functionality required +to extract a "Workbook" or "Book" stream (as one big string) +from an OLE2 Compound Document file. +""" from __future__ import print_function + +import array import sys from struct import unpack + from .timemachine import * -import array -## -# Magic cookie that should appear in the first 8 bytes of the file. +#: Magic cookie that should appear in the first 8 bytes of the file. SIGNATURE = b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1" EOCSID = -2 @@ -61,7 +56,7 @@ def dump(self, DEBUG=1): "DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d\n", self.DID, self.name, self.etype, self.left_DID, self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size - ) + ) if DEBUG == 2: # cre_lo, cre_hi, mod_lo, mod_hi = tsinfo print("timestamp info", self.tsinfo, file=self.logfile) @@ -75,12 +70,16 @@ def _build_family_tree(dirlist, parent_DID, child_DID): if dirlist[child_DID].etype == 1: # storage _build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID) -## -# Compound document handler. -# @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The -# only operation it needs to support is slicing. class CompDoc(object): + """ + Compound document handler. + + :param mem: + The raw contents of the file, as a string, or as an :class:`mmap.mmap` + object. The only operation it needs to support is slicing. + """ + def __init__(self, mem, logfile=sys.stdout, DEBUG=0): self.logfile = logfile @@ -95,11 +94,11 @@ def __init__(self, mem, logfile=sys.stdout, DEBUG=0): self.mem = mem ssz, sssz = unpack(' 20: # allows for 2**20 bytes i.e. 1MB - print("WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \ + print("WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." % ssz, file=logfile) ssz = 9 if sssz > ssz: - print("WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \ + print("WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." % sssz, file=logfile) sssz = 6 self.sec_size = sec_size = 1 << ssz @@ -110,14 +109,13 @@ def __init__(self, mem, logfile=sys.stdout, DEBUG=0): SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream, SSAT_first_sec_sid, SSAT_tot_secs, MSATX_first_sec_sid, MSATX_tot_secs, - # ) = unpack('>>", mem_data_secs, nent, SAT_sectors_reqd, expected_MSATX_sectors, actual_MSATX_sectors, file=logfile) if DEBUG: @@ -190,7 +188,7 @@ def __init__(self, mem, logfile=sys.stdout, DEBUG=0): if msid >= mem_data_secs: if not trunc_warned: print("WARNING *** File is truncated, or OLE2 MSAT is corrupt!!", file=logfile) - print("INFO: Trying to access sector %d but only %d available" \ + print("INFO: Trying to access sector %d but only %d available" % (msid, mem_data_secs), file=logfile) trunc_warned = 1 MSAT[msidx] = EVILSID @@ -212,8 +210,8 @@ def __init__(self, mem, logfile=sys.stdout, DEBUG=0): dump_list(self.SAT, 10, logfile) # print >> logfile, "SAT ", # for i, s in enumerate(self.SAT): - # print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s) - # print >> logfile, "%d:%d " % (i, s), + # print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s) + # print >> logfile, "%d:%d " % (i, s), print(file=logfile) if DEBUG and dump_again: print("MSAT: len =", len(MSAT), file=logfile) @@ -302,7 +300,7 @@ def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', s raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) - ) + ) assert s == EOCSID else: todo = size @@ -323,10 +321,10 @@ def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name='', s raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) - ) + ) assert s == EOCSID if todo != 0: - fprintf(self.logfile, + fprintf(self.logfile, "WARNING *** OLE2 stream %r: expected size %d, actual size %d\n", name, size, size - todo) @@ -350,12 +348,16 @@ def _dir_search(self, path, storage_DID=0): raise CompDocError("Requested stream is not a 'user stream'") return None - ## - # Interrogate the compound document's directory; return the stream as a string if found, otherwise - # return None. - # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. def get_named_stream(self, qname): + """ + Interrogate the compound document's directory; return the stream as a + string if found, otherwise return ``None``. + + :param qname: + Name of the desired stream e.g. ``'Workbook'``. + Should be in Unicode or convertible thereto. + """ d = self._dir_search(qname.split("/")) if d is None: return None @@ -368,16 +370,23 @@ def get_named_stream(self, qname): self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, name=qname + " (from SSCS)", seen_id=None) - ## - # Interrogate the compound document's directory. - # If the named stream is not found, (None, 0, 0) will be returned. - # If the named stream is found and is contiguous within the original byte sequence ("mem") - # used when the document was opened, - # then (mem, offset_to_start_of_stream, length_of_stream) is returned. - # Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned. - # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. - def locate_named_stream(self, qname): + """ + Interrogate the compound document's directory. + + If the named stream is not found, ``(None, 0, 0)`` will be returned. + + If the named stream is found and is contiguous within the original + byte sequence (``mem``) used when the document was opened, + then ``(mem, offset_to_start_of_stream, length_of_stream)`` is returned. + + Otherwise a new string is built from the fragments and + ``(new_string, 0, length_of_stream)`` is returned. + + :param qname: + Name of the desired stream e.g. ``'Workbook'``. + Should be in Unicode or convertible thereto. + """ d = self._dir_search(qname.split("/")) if d is None: return (None, 0, 0) @@ -386,7 +395,7 @@ def locate_named_stream(self, qname): % (qname, d.tot_size, self.mem_data_len)) if d.tot_size >= self.min_size_std_stream: result = self._locate_stream( - self.mem, 512, self.SAT, self.sec_size, d.first_SID, + self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, qname, d.DID+6) if self.DEBUG: print("\nseen", file=self.logfile) @@ -398,8 +407,8 @@ def locate_named_stream(self, qname): self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, qname + " (from SSCS)", None), 0, - d.tot_size - ) + d.tot_size, + ) def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_size, qname, seen_id): # print >> self.logfile, "_locate_stream", base, sec_size, start_sid, expected_stream_size @@ -419,10 +428,11 @@ def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_si self.seen[s] = seen_id tot_found += 1 if tot_found > found_limit: + # Note: expected size rounded up to higher sector raise CompDocError( "%s: size exceeds expected %d bytes; corrupt?" % (qname, found_limit * sec_size) - ) # Note: expected size rounded up to higher sector + ) if s == p+1: # contiguous sectors end_pos += sec_size @@ -443,7 +453,7 @@ def _locate_stream(self, mem, base, sat, sec_size, start_sid, expected_stream_si return (mem, start_pos, expected_stream_size) slices.append((start_pos, end_pos)) # print >> self.logfile, "+++>>> %d fragments" % len(slices) - return (b''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, expected_stream_size) + return (b''.join(mem[start_pos:end_pos] for start_pos, end_pos in slices), 0, expected_stream_size) # ========================================================================================== def x_dump_line(alist, stride, f, dpos, equal=0): diff --git a/SUEWSPrepare/Modules/xlrd/doc/compdoc.html b/SUEWSPrepare/Modules/xlrd/doc/compdoc.html deleted file mode 100644 index ebabfe6..0000000 --- a/SUEWSPrepare/Modules/xlrd/doc/compdoc.html +++ /dev/null @@ -1,69 +0,0 @@ - - - - -The compdoc Module - - -

The compdoc Module

-

Implements the minimal functionality required -to extract a "Workbook" or "Book" stream (as one big string) -from an OLE2 Compound Document file. -

Copyright © 2005-2012 Stephen John Machin, Lingfo Pty Ltd

-

This module is part of the xlrd package, which is released under a BSD-style licence.

-

Module Contents

-
-
CompDoc(mem, logfile=sys.stdout, DEBUG=0) (class) [#]
-
-

Compound document handler.

-
-
mem
-
-The raw contents of the file, as a string, or as an mmap.mmap() object. The -only operation it needs to support is slicing.
-

-

For more information about this class, see The CompDoc Class.

-
-
SIGNATURE (variable) [#]
-
-

Magic cookie that should appear in the first 8 bytes of the file.

-
-
-

The CompDoc Class

-
-
CompDoc(mem, logfile=sys.stdout, DEBUG=0) (class) [#]
-
-

Compound document handler.

-
-
mem
-
-The raw contents of the file, as a string, or as an mmap.mmap() object. The -only operation it needs to support is slicing.
-

-
-
get_named_stream(qname) [#]
-
-

Interrogate the compound document's directory; return the stream as a string if found, otherwise -return None.

-
-
qname
-
-Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
-

-
-
locate_named_stream(qname) [#]
-
-

Interrogate the compound document's directory. -If the named stream is not found, (None, 0, 0) will be returned. -If the named stream is found and is contiguous within the original byte sequence ("mem") -used when the document was opened, -then (mem, offset_to_start_of_stream, length_of_stream) is returned. -Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned.

-
-
qname
-
-Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto.
-

-
-
- diff --git a/SUEWSPrepare/Modules/xlrd/doc/xlrd.html b/SUEWSPrepare/Modules/xlrd/doc/xlrd.html deleted file mode 100644 index b096185..0000000 --- a/SUEWSPrepare/Modules/xlrd/doc/xlrd.html +++ /dev/null @@ -1,2064 +0,0 @@ - - - - -The xlrd Module - - -

The xlrd Module

-

A Python module for extracting data from MS Excel (TM) spreadsheet files. -

-Version 0.7.4 -- April 2012 -

- -

General information

- -

Acknowledgements

- -

-Development of this module would not have been possible without the document -"OpenOffice.org's Documentation of the Microsoft Excel File Format" -("OOo docs" for short). -The latest version is available from OpenOffice.org in - PDF format -and - ODT format. -Small portions of the OOo docs are reproduced in this -document. A study of the OOo docs is recommended for those who wish a -deeper understanding of the Excel file layout than the xlrd docs can provide. -

- -

Backporting to Python 2.1 was partially funded by - - Journyx - provider of timesheet and project accounting solutions. - -

- -

Provision of formatting information in version 0.6.1 was funded by - - Simplistix Ltd. - -

- -

Unicode

- -

This module presents all text strings as Python unicode objects. -From Excel 97 onwards, text in Excel spreadsheets has been stored as Unicode. -Older files (Excel 95 and earlier) don't keep strings in Unicode; -a CODEPAGE record provides a codepage number (for example, 1252) which is -used by xlrd to derive the encoding (for same example: "cp1252") which is -used to translate to Unicode.

- -

If the CODEPAGE record is missing (possible if the file was created -by third-party software), xlrd will assume that the encoding is ascii, and keep going. -If the actual encoding is not ascii, a UnicodeDecodeError exception will be raised and -you will need to determine the encoding yourself, and tell xlrd: -

-    book = xlrd.open_workbook(..., encoding_override="cp1252")
-

-

If the CODEPAGE record exists but is wrong (for example, the codepage -number is 1251, but the strings are actually encoded in koi8_r), -it can be overridden using the same mechanism. -The supplied runxlrd.py has a corresponding command-line argument, which -may be used for experimentation: -

-    runxlrd.py -e koi8_r 3rows myfile.xls
-

-

The first place to look for an encoding ("codec name") is - -the Python documentation. -

-
- -

Dates in Excel spreadsheets

- -

In reality, there are no such things. What you have are floating point -numbers and pious hope. -There are several problems with Excel dates:

- -

(1) Dates are not stored as a separate data type; they are stored as -floating point numbers and you have to rely on -(a) the "number format" applied to them in Excel and/or -(b) knowing which cells are supposed to have dates in them. -This module helps with (a) by inspecting the -format that has been applied to each number cell; -if it appears to be a date format, the cell -is classified as a date rather than a number. Feedback on this feature, -especially from non-English-speaking locales, would be appreciated.

- -

(2) Excel for Windows stores dates by default as the number of -days (or fraction thereof) since 1899-12-31T00:00:00. Excel for -Macintosh uses a default start date of 1904-01-01T00:00:00. The date -system can be changed in Excel on a per-workbook basis (for example: -Tools -> Options -> Calculation, tick the "1904 date system" box). -This is of course a bad idea if there are already dates in the -workbook. There is no good reason to change it even if there are no -dates in the workbook. Which date system is in use is recorded in the -workbook. A workbook transported from Windows to Macintosh (or vice -versa) will work correctly with the host Excel. When using this -module's xldate_as_tuple function to convert numbers from a workbook, -you must use the datemode attribute of the Book object. If you guess, -or make a judgement depending on where you believe the workbook was -created, you run the risk of being 1462 days out of kilter.

- -

Reference: -http://support.microsoft.com/default.aspx?scid=KB;EN-US;q180162

- - -

(3) The Excel implementation of the Windows-default 1900-based date system works on the -incorrect premise that 1900 was a leap year. It interprets the number 60 as meaning 1900-02-29, -which is not a valid date. Consequently any number less than 61 is ambiguous. Example: is 59 the -result of 1900-02-28 entered directly, or is it 1900-03-01 minus 2 days? The OpenOffice.org Calc -program "corrects" the Microsoft problem; entering 1900-02-27 causes the number 59 to be stored. -Save as an XLS file, then open the file with Excel -- you'll see 1900-02-28 displayed.

- -

Reference: http://support.microsoft.com/default.aspx?scid=kb;en-us;214326

- -

(4) The Macintosh-default 1904-based date system counts 1904-01-02 as day 1 and 1904-01-01 as day zero. -Thus any number such that (0.0 <= number < 1.0) is ambiguous. Is 0.625 a time of day (15:00:00), -independent of the calendar, -or should it be interpreted as an instant on a particular day (1904-01-01T15:00:00)? -The xldate_* functions in this module -take the view that such a number is a calendar-independent time of day (like Python's datetime.time type) for both -date systems. This is consistent with more recent Microsoft documentation -(for example, the help file for Excel 2002 which says that the first day -in the 1904 date system is 1904-01-02). - -

(5) Usage of the Excel DATE() function may leave strange dates in a spreadsheet. Quoting the help file, -in respect of the 1900 date system: "If year is between 0 (zero) and 1899 (inclusive), -Excel adds that value to 1900 to calculate the year. For example, DATE(108,1,2) returns January 2, 2008 (1900+108)." -This gimmick, semi-defensible only for arguments up to 99 and only in the pre-Y2K-awareness era, -means that DATE(1899, 12, 31) is interpreted as 3799-12-31.

- -

For further information, please refer to the documentation for the xldate_* functions.

- -

Named references, constants, formulas, and macros

- -

-A name is used to refer to a cell, a group of cells, a constant -value, a formula, or a macro. Usually the scope of a name is global -across the whole workbook. However it can be local to a worksheet. -For example, if the sales figures are in different cells in -different sheets, the user may define the name "Sales" in each -sheet. There are built-in names, like "Print_Area" and -"Print_Titles"; these two are naturally local to a sheet. -

-To inspect the names with a user interface like MS Excel, OOo Calc, -or Gnumeric, click on Insert/Names/Define. This will show the global -names, plus those local to the currently selected sheet. -

-A Book object provides two dictionaries (name_map and -name_and_scope_map) and a list (name_obj_list) which allow various -ways of accessing the Name objects. There is one Name object for -each NAME record found in the workbook. Name objects have many -attributes, several of which are relevant only when obj.macro is 1. -

-In the examples directory you will find namesdemo.xls which -showcases the many different ways that names can be used, and -xlrdnamesAPIdemo.py which offers 3 different queries for inspecting -the names in your files, and shows how to extract whatever a name is -referring to. There is currently one "convenience method", -Name.cell(), which extracts the value in the case where the name -refers to a single cell. More convenience methods are planned. The -source code for Name.cell (in __init__.py) is an extra source of -information on how the Name attributes hang together. -

- -

Name information is not extracted from files older than -Excel 5.0 (Book.biff_version < 50)

- -

Formatting

- -

Introduction

- -

This collection of features, new in xlrd version 0.6.1, is intended -to provide the information needed to (1) display/render spreadsheet contents -(say) on a screen or in a PDF file, and (2) copy spreadsheet data to another -file without losing the ability to display/render it.

- -

The Palette; Colour Indexes

- -

A colour is represented in Excel as a (red, green, blue) ("RGB") tuple -with each component in range(256). However it is not possible to access an -unlimited number of colours; each spreadsheet is limited to a palette of 64 different -colours (24 in Excel 3.0 and 4.0, 8 in Excel 2.0). Colours are referenced by an index -("colour index") into this palette. - -Colour indexes 0 to 7 represent 8 fixed built-in colours: black, white, red, green, blue, -yellow, magenta, and cyan.

- -The remaining colours in the palette (8 to 63 in Excel 5.0 and later) -can be changed by the user. In the Excel 2003 UI, Tools/Options/Color presents a palette -of 7 rows of 8 colours. The last two rows are reserved for use in charts.
-The correspondence between this grid and the assigned -colour indexes is NOT left-to-right top-to-bottom.
-Indexes 8 to 15 correspond to changeable -parallels of the 8 fixed colours -- for example, index 7 is forever cyan; -index 15 starts off being cyan but can be changed by the user.
- -The default colour for each index depends on the file version; tables of the defaults -are available in the source code. If the user changes one or more colours, -a PALETTE record appears in the XLS file -- it gives the RGB values for *all* changeable -indexes.
-Note that colours can be used in "number formats": "[CYAN]...." and "[COLOR8]...." refer -to colour index 7; "[COLOR16]...." will produce cyan -unless the user changes colour index 15 to something else.
- -

In addition, there are several "magic" colour indexes used by Excel:
-0x18 (BIFF3-BIFF4), 0x40 (BIFF5-BIFF8): System window text colour for border lines -(used in XF, CF, and WINDOW2 records)
-0x19 (BIFF3-BIFF4), 0x41 (BIFF5-BIFF8): System window background colour for pattern background -(used in XF and CF records )
-0x43: System face colour (dialogue background colour)
-0x4D: System window text colour for chart border lines
-0x4E: System window background colour for chart areas
-0x4F: Automatic colour for chart border lines (seems to be always Black)
-0x50: System ToolTip background colour (used in note objects)
-0x51: System ToolTip text colour (used in note objects)
-0x7FFF: System window text colour for fonts (used in FONT and CF records)
-Note 0x7FFF appears to be the *default* colour index. It appears quite often in FONT -records.
- -

Default Formatting

- -Default formatting is applied to all empty cells (those not described by a cell record). -Firstly row default information (ROW record, Rowinfo class) is used if available. -Failing that, column default information (COLINFO record, Colinfo class) is used if available. -As a last resort the worksheet/workbook default cell format will be used; this -should always be present in an Excel file, -described by the XF record with the fixed index 15 (0-based). By default, it uses the -worksheet/workbook default cell style, described by the very first XF record (index 0). - -

Formatting features not included in xlrd version 0.6.1

-
    -
  • Rich text i.e. strings containing partial bold italic - and underlined text, change of font inside a string, etc. - See OOo docs s3.4 and s3.2. - Rich text is included in version 0.7.2
  • -
  • Asian phonetic text (known as "ruby"), used for Japanese furigana. See OOo docs - s3.4.2 (p15)
  • -
  • Conditional formatting. See OOo docs - s5.12, s6.21 (CONDFMT record), s6.16 (CF record)
  • -
  • Miscellaneous sheet-level and book-level items e.g. printing layout, screen panes.
  • -
  • Modern Excel file versions don't keep most of the built-in - "number formats" in the file; Excel loads formats according to the - user's locale. Currently xlrd's emulation of this is limited to - a hard-wired table that applies to the US English locale. This may mean - that currency symbols, date order, thousands separator, decimals separator, etc - are inappropriate. Note that this does not affect users who are copying XLS - files, only those who are visually rendering cells.
  • -
- -

Loading worksheets on demand

- -

This feature, new in version 0.7.1, is governed by the on_demand argument -to the open_workbook() function and allows saving memory and time by loading -only those sheets that the caller is interested in, and releasing sheets -when no longer required.

- -

on_demand=False (default): No change. open_workbook() loads global data -and all sheets, releases resources no longer required (principally the -str or mmap object containing the Workbook stream), and returns.

- -

on_demand=True and BIFF version < 5.0: A warning message is emitted, -on_demand is recorded as False, and the old process is followed.

- -

on_demand=True and BIFF version >= 5.0: open_workbook() loads global -data and returns without releasing resources. At this stage, the only -information available about sheets is Book.nsheets and Book.sheet_names().

- -

Book.sheet_by_name() and Book.sheet_by_index() will load the requested -sheet if it is not already loaded.

- -

Book.sheets() will load all/any unloaded sheets.

- -

The caller may save memory by calling -Book.unload_sheet(sheet_name_or_index) when finished with the sheet. -This applies irrespective of the state of on_demand.

- -

The caller may re-load an unloaded sheet by calling Book.sheet_by_xxxx() - -- except if those required resources have been released (which will -have happened automatically when on_demand is false). This is the only -case where an exception will be raised.

- -

The caller may query the state of a sheet: -Book.sheet_loaded(sheet_name_or_index) -> a bool

- -

Book.release_resources() may used to save memory and close -any memory-mapped file before proceding to examine already-loaded -sheets. Once resources are released, no further sheets can be loaded.

- -

When using on-demand, it is advisable to ensure that -Book.release_resources() is always called even if an exception -is raised in your own code; otherwise if the input file has been -memory-mapped, the mmap.mmap object will not be closed and you will -not be able to access the physical file until your Python process -terminates. This can be done by calling Book.release_resources() -explicitly in the finally suite of a try/finally block. -New in xlrd 0.7.2: the Book object is a "context manager", so if -using Python 2.5 or later, you can wrap your code in a "with" -statement.

-

Module Contents

-
-
BaseObject (class) [#]
-
-

Parent of almost all other classes in the package.

-

For more information about this class, see The BaseObject Class.

-
-
Book() (class) [#]
-
-

Contents of a "workbook".

-

For more information about this class, see The Book Class.

-
-
Cell(ctype, value, xf_index=None) (class) [#]
-
-

Contains the data for one cell.

-

For more information about this class, see The Cell Class.

-
-
cellname(rowx, colx) [#]
-
-

Utility function: (5, 7) => 'H6'

-
-
cellnameabs(rowx, colx, r1c1=0) [#]
-
-

Utility function: (5, 7) => '$H$6'

-
-
Colinfo (class) [#]
-
-

Width and default formatting information that applies to one or -more columns in a sheet.

-

For more information about this class, see The Colinfo Class.

-
-
colname(colx) [#]
-
-

Utility function: 7 => 'H', 27 => 'AB'

-
-
count_records(filename, outfile=sys.stdout) [#]
-
-

For debugging and analysis: summarise the file's BIFF records. -I.e. produce a sorted file of (record_name, count).

-
-
filename
-
-The path to the file to be summarised.
-
outfile
-
-An open file, to which the summary is written.
-

-
-
dump(filename, outfile=sys.stdout, unnumbered=False) [#]
-
-

For debugging: dump the file's BIFF records in char & hex. -

-
filename
-
-The path to the file to be dumped.
-
outfile
-
-An open file, to which the dump is written.
-
unnumbered
-
-If true, omit offsets (for meaningful diffs).
-

-
-
empty_cell (variable) [#]
-
-

There is one and only one instance of an empty cell -- it's a singleton. This is it. -You may use a test like "acell is empty_cell".

-
-
EqNeAttrs (class) [#]
-
-

This mixin class exists solely so that Format, Font, and XF....

-

For more information about this class, see The EqNeAttrs Class.

-
-
error_text_from_code (variable) [#]
-
-

This dictionary can be used to produce a text version of the internal codes -that Excel uses for error cells. Here are its contents: -

-0x00: '#NULL!',  # Intersection of two cell ranges is empty
-0x07: '#DIV/0!', # Division by zero
-0x0F: '#VALUE!', # Wrong type of operand
-0x17: '#REF!',   # Illegal or deleted cell reference
-0x1D: '#NAME?',  # Wrong function or range name
-0x24: '#NUM!',   # Value range overflow
-0x2A: '#N/A',    # Argument or function not available
-

-
-
Font (class) [#]
-
-

An Excel "font" contains the details of not only what is normally -considered a font, but also several other display attributes.

-

For more information about this class, see The Font Class.

-
-
Format(format_key, ty, format_str) (class) [#]
-
-

"Number format" information from a FORMAT record.

-

For more information about this class, see The Format Class.

-
-
Hyperlink (class) [#]
-
-

Contains the attributes of a hyperlink.

-

For more information about this class, see The Hyperlink Class.

-
-
Name (class) [#]
-
-

Information relating to a named reference, formula, macro, etc.

-

For more information about this class, see The Name Class.

-
-
Note (class) [#]
-
-

Represents a user "comment" or "note".

-

For more information about this class, see The Note Class.

-
-
open_workbook(filename=None, -logfile=sys.stdout, verbosity=0, use_mmap=USE_MMAP, -file_contents=None, -encoding_override=None, -formatting_info=False, on_demand=False, ragged_rows=False, -) [#]
-
-

Open a spreadsheet file for data extraction.

-
-
filename
-
-The path to the spreadsheet file to be opened.
-
logfile
-
-An open file to which messages and diagnostics are written.
-
verbosity
-
-Increases the volume of trace material written to the logfile.
-
use_mmap
-
-Whether to use the mmap module is determined heuristically. -Use this arg to override the result. Current heuristic: mmap is used if it exists.
-
file_contents
-
-... as a string or an mmap.mmap object or some other behave-alike object. -If file_contents is supplied, filename will not be used, except (possibly) in messages.
-
encoding_override
-
-Used to overcome missing or bad codepage information -in older-version files. Refer to discussion in the Unicode section above. -
-- New in version 0.6.0 - -
-
formatting_info
-
-Governs provision of a reference to an XF (eXtended Format) object -for each cell in the worksheet. -
Default is False. This is backwards compatible and saves memory. -"Blank" cells (those with their own formatting information but no data) are treated as empty -(by ignoring the file's BLANK and MULBLANK records). -It cuts off any bottom "margin" of rows of empty (and blank) cells and -any right "margin" of columns of empty (and blank) cells. -Only cell_value and cell_type are available. -
True provides all cells, including empty and blank cells. -XF information is available for each cell. -
-- New in version 0.6.1 - -
-
on_demand
-
-Governs whether sheets are all loaded initially or when demanded -by the caller. Please refer back to the section "Loading worksheets on demand" for details. -
-- New in version 0.7.1 - -
-
ragged_rows
-
-False (the default) means all rows are padded out with empty cells so that all -rows have the same size (Sheet.ncols). True means that there are no empty cells at the ends of rows. -This can result in substantial memory savings if rows are of widely varying sizes. See also the -Sheet.row_len() method. -
-- New in version 0.7.2 - -
-
Returns:
-
-An instance of the Book class.
-

-
-
Operand(akind=None, avalue=None, arank=0, atext='?') (class) [#]
-
-

Used in evaluating formulas.

-

For more information about this class, see The Operand Class.

-
-
rangename3d(book, ref3d) [#]
-
-

Utility function: -
Ref3D((1, 4, 5, 20, 7, 10)) => 'Sheet2:Sheet3!$H$6:$J$20' -

-
rangename3drel(book, ref3d, browx=None, bcolx=None, r1c1=0) [#]
-
-

Utility function: -
Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1)) -R1C1 mode => 'Sheet1!R[-32]C[-13]:R[-23]C[12]' -A1 mode => depends on base cell (browx, bcolx) -

-
Ref3D(atuple) (class) [#]
-
-

Represents an absolute or relative 3-dimensional reference to a box -of one or more cells.

-

For more information about this class, see The Ref3D Class.

-
-
Rowinfo() (class) [#]
-
-

Height and default formatting information that applies to a row in a sheet.

-

For more information about this class, see The Rowinfo Class.

-
-
Sheet(book, position, name, number) (class) [#]
-
-

Contains the data for one worksheet.

-

For more information about this class, see The Sheet Class.

-
-
XF (class) [#]
-
-

eXtended Formatting information for cells, rows, columns and styles.

-

For more information about this class, see The XF Class.

-
-
XFAlignment (class) [#]
-
-

A collection of the alignment and similar attributes of an XF record.

-

For more information about this class, see The XFAlignment Class.

-
-
XFBackground (class) [#]
-
-

A collection of the background-related attributes of an XF record.

-

For more information about this class, see The XFBackground Class.

-
-
XFBorder (class) [#]
-
-

A collection of the border-related attributes of an XF record.

-

For more information about this class, see The XFBorder Class.

-
-
XFProtection (class) [#]
-
-

A collection of the protection-related attributes of an XF record.

-

For more information about this class, see The XFProtection Class.

-
-
xldate_as_tuple(xldate, datemode) [#]
-
-

Convert an Excel number (presumed to represent a date, a datetime or a time) into -a tuple suitable for feeding to datetime or mx.DateTime constructors.

-
-
xldate
-
-The Excel number
-
datemode
-
-0: 1900-based, 1: 1904-based. -
WARNING: when using this function to -interpret the contents of a workbook, you should pass in the Book.datemode -attribute of that workbook. Whether -the workbook has ever been anywhere near a Macintosh is irrelevant. -
-
Returns:
-
-Gregorian (year, month, day, hour, minute, nearest_second). -
Special case: if 0.0 <= xldate < 1.0, it is assumed to represent a time; -(0, 0, 0, hour, minute, second) will be returned. -
Note: 1904-01-01 is not regarded as a valid date in the datemode 1 system; its "serial number" -is zero. -
-
Raises XLDateNegative:
-xldate < 0.00 -
-
Raises XLDateAmbiguous:
-The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0) -
-
Raises XLDateTooLarge:
-Gregorian year 10000 or later
-
Raises XLDateBadDatemode:
-datemode arg is neither 0 nor 1
-
Raises XLDateError:
-Covers the 4 specific errors
-

-
-
xldate_from_date_tuple((year, month, day), datemode) [#]
-
-

Convert a date tuple (year, month, day) to an Excel date.

-
-
year
-
-Gregorian year.
-
month
-
-1 <= month <= 12 -
-
day
-
-1 <= day <= last day of that (year, month) -
-
datemode
-
-0: 1900-based, 1: 1904-based.
-
Raises XLDateAmbiguous:
-The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0) -
-
Raises XLDateBadDatemode:
-datemode arg is neither 0 nor 1
-
Raises XLDateBadTuple:
-(year, month, day) is too early/late or has invalid component(s)
-
Raises XLDateError:
-Covers the specific errors
-

-
-
xldate_from_datetime_tuple(datetime_tuple, datemode) [#]
-
-

Convert a datetime tuple (year, month, day, hour, minute, second) to an Excel date value. -For more details, refer to other xldate_from_*_tuple functions.

-
-
datetime_tuple
-
-(year, month, day, hour, minute, second)
-
datemode
-
-0: 1900-based, 1: 1904-based.
-

-
-
xldate_from_time_tuple((hour, minute, second)) [#]
-
-

Convert a time tuple (hour, minute, second) to an Excel "date" value (fraction of a day).

-
-
hour
-
-0 <= hour < 24 -
-
minute
-
-0 <= minute < 60 -
-
second
-
-0 <= second < 60 -
-
Raises XLDateBadTuple:
-Out-of-range hour, minute, or second
-

-
-
-

The BaseObject Class

-
-
BaseObject (class) [#]
-
-

Parent of almost all other classes in the package. Defines a common "dump" method -for debugging.

-
-
dump(f=None, header=None, footer=None, indent=0) [#]
-
-
-
f
-
-open file object, to which the dump is written
-
header
-
-text to write before the dump
-
footer
-
-text to write after the dump
-
indent
-
-number of leading spaces (for recursive calls)
-

-
-
-

The Book Class

-
-
Book() (class) [#]
-
-

Contents of a "workbook". -

WARNING: You don't call this class yourself. You use the Book object that -was returned when you called xlrd.open_workbook("myfile.xls").

-
-
biff_version [#]
-
-

Version of BIFF (Binary Interchange File Format) used to create the file. -Latest is 8.0 (represented here as 80), introduced with Excel 97. -Earliest supported by this module: 2.0 (represented as 20).

-
-
codepage [#]
-
-

An integer denoting the character set used for strings in this file. -For BIFF 8 and later, this will be 1200, meaning Unicode; more precisely, UTF_16_LE. -For earlier versions, this is used to derive the appropriate Python encoding -to be used to convert to Unicode. -Examples: 1252 -> 'cp1252', 10000 -> 'mac_roman'

-
-
colour_map [#]
-
-

This provides definitions for colour indexes. Please refer to the -above section "The Palette; Colour Indexes" for an explanation -of how colours are represented in Excel.
-Colour indexes into the palette map into (red, green, blue) tuples. -"Magic" indexes e.g. 0x7FFF map to None. -colour_map is what you need if you want to render cells on screen or in a PDF -file. If you are writing an output XLS file, use palette_record. -
-- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True) -

-
countries [#]
-
-

A tuple containing the (telephone system) country code for:
- [0]: the user-interface setting when the file was created.
- [1]: the regional settings.
-Example: (1, 61) meaning (USA, Australia). -This information may give a clue to the correct encoding for an unknown codepage. -For a long list of observed values, refer to the OpenOffice.org documentation for -the COUNTRY record. -

-
datemode [#]
-
-

Which date system was in force when this file was last saved.
- 0 => 1900 system (the Excel for Windows default).
- 1 => 1904 system (the Excel for Macintosh default).
-

-
encoding [#]
-
-

The encoding that was derived from the codepage.

-
-
font_list [#]
-
-

A list of Font class instances, each corresponding to a FONT record. -
-- New in version 0.6.1 -

-
format_list [#]
-
-

A list of Format objects, each corresponding to a FORMAT record, in -the order that they appear in the input file. -It does not contain builtin formats. -If you are creating an output file using (for example) pyExcelerator, -use this list. -The collection to be used for all visual rendering purposes is format_map. -
-- New in version 0.6.1 -

-
format_map [#]
-
-

The mapping from XF.format_key to Format object. -
-- New in version 0.6.1 -

-
load_time_stage_1 [#]
-
-

Time in seconds to extract the XLS image as a contiguous string (or mmap equivalent).

-
-
load_time_stage_2 [#]
-
-

Time in seconds to parse the data from the contiguous string (or mmap equivalent).

-
-
name_and_scope_map [#]
-
-

A mapping from (lower_case_name, scope) to a single Name object. -
-- New in version 0.6.0 -

-
name_map [#]
-
-

A mapping from lower_case_name to a list of Name objects. The list is -sorted in scope order. Typically there will be one item (of global scope) -in the list. -
-- New in version 0.6.0 -

-
name_obj_list [#]
-
-

List containing a Name object for each NAME record in the workbook. -
-- New in version 0.6.0 -

-
nsheets [#]
-
-

The number of worksheets present in the workbook file. -This information is available even when no sheets have yet been loaded.

-
-
palette_record [#]
-
-

If the user has changed any of the colours in the standard palette, the XLS -file will contain a PALETTE record with 56 (16 for Excel 4.0 and earlier) -RGB values in it, and this list will be e.g. [(r0, b0, g0), ..., (r55, b55, g55)]. -Otherwise this list will be empty. This is what you need if you are -writing an output XLS file. If you want to render cells on screen or in a PDF -file, use colour_map. -
-- New in version 0.6.1. Extracted only if open_workbook(..., formatting_info=True) -

-
release_resources() [#]
-
-

This method has a dual purpose. You can call it to release -memory-consuming objects and (possibly) a memory-mapped file -(mmap.mmap object) when you have finished loading sheets in -on_demand mode, but still require the Book object to examine the -loaded sheets. It is also called automatically (a) when open_workbook -raises an exception and (b) if you are using a "with" statement, when -the "with" block is exited. Calling this method multiple times on the -same object has no ill effect.

-
-
sheet_by_index(sheetx) [#]
-
-
-
sheetx
-
-Sheet index in range(nsheets)
-
Returns:
-
-An object of the Sheet class
-

-
-
sheet_by_name(sheet_name) [#]
-
-
-
sheet_name
-
-Name of sheet required
-
Returns:
-
-An object of the Sheet class
-

-
-
sheet_loaded(sheet_name_or_index) [#]
-
-
-
sheet_name_or_index
-
-Name or index of sheet enquired upon
-
Returns:
-
-true if sheet is loaded, false otherwise -
-- New in version 0.7.1 -
-

-
-
sheet_names() [#]
-
-
-
Returns:
-
-A list of the names of all the worksheets in the workbook file. -This information is available even when no sheets have yet been loaded.
-

-
-
sheets() [#]
-
-
-
Returns:
-
-A list of all sheets in the book. -All sheets not already loaded will be loaded.
-

-
-
style_name_map [#]
-
-

This provides access via name to the extended format information for -both built-in styles and user-defined styles.
-It maps name to (built_in, xf_index), where:
-name is either the name of a user-defined style, -or the name of one of the built-in styles. Known built-in names are -Normal, RowLevel_1 to RowLevel_7, -ColLevel_1 to ColLevel_7, Comma, Currency, Percent, "Comma [0]", -"Currency [0]", Hyperlink, and "Followed Hyperlink".
-built_in 1 = built-in style, 0 = user-defined
-xf_index is an index into Book.xf_list.
-References: OOo docs s6.99 (STYLE record); Excel UI Format/Style -
-- New in version 0.6.1; since 0.7.4, extracted only if -open_workbook(..., formatting_info=True) -

-
unload_sheet(sheet_name_or_index) [#]
-
-
-
sheet_name_or_index
-
-Name or index of sheet to be unloaded. -
-- New in version 0.7.1 -
-

-
-
user_name [#]
-
-

What (if anything) is recorded as the name of the last user to save the file.

-
-
xf_list [#]
-
-

A list of XF class instances, each corresponding to an XF record. -
-- New in version 0.6.1 -

-
-

The Cell Class

-
-
Cell(ctype, value, xf_index=None) (class) [#]
-
-

Contains the data for one cell.

- -

WARNING: You don't call this class yourself. You access Cell objects -via methods of the Sheet object(s) that you found in the Book object that -was returned when you called xlrd.open_workbook("myfile.xls").

-

Cell objects have three attributes: ctype is an int, value -(which depends on ctype) and xf_index. -If "formatting_info" is not enabled when the workbook is opened, xf_index will be None. -The following table describes the types of cells and how their values -are represented in Python.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Type symbolType numberPython value
XL_CELL_EMPTY0empty string u''
XL_CELL_TEXT1a Unicode string
XL_CELL_NUMBER2float
XL_CELL_DATE3float
XL_CELL_BOOLEAN4int; 1 means TRUE, 0 means FALSE
XL_CELL_ERROR5int representing internal Excel codes; for a text representation, -refer to the supplied dictionary error_text_from_code
XL_CELL_BLANK6empty string u''. Note: this type will appear only when -open_workbook(..., formatting_info=True) is used.
-

-

-
-

The Colinfo Class

-
-
Colinfo (class) [#]
-
-

Width and default formatting information that applies to one or -more columns in a sheet. Derived from COLINFO records. - -

Here is the default hierarchy for width, according to the OOo docs: - -
"""In BIFF3, if a COLINFO record is missing for a column, -the width specified in the record DEFCOLWIDTH is used instead. - -
In BIFF4-BIFF7, the width set in this [COLINFO] record is only used, -if the corresponding bit for this column is cleared in the GCW -record, otherwise the column width set in the DEFCOLWIDTH record -is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]). - -
In BIFF8, if a COLINFO record is missing for a column, -the width specified in the record STANDARDWIDTH is used. -If this [STANDARDWIDTH] record is also missing, -the column width of the record DEFCOLWIDTH is used instead.""" -
- -Footnote: The docs on the GCW record say this: -"""
-If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH -record. If a bit is cleared, the corresponding column uses the width set in the -COLINFO record for this column. -
If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if -the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH -record of the worksheet will be used instead. -
"""
-At the moment (2007-01-17) xlrd is going with the GCW version of the story. -Reference to the source may be useful: see the computed_column_width(colx) method -of the Sheet class. -
-- New in version 0.6.1 -

-
-
bit1_flag [#]
-
-

Value of a 1-bit flag whose purpose is unknown -but is often seen set to 1

-
-
collapsed [#]
-
-

1 = column is collapsed

-
-
hidden [#]
-
-

1 = column is hidden

-
-
outline_level [#]
-
-

Outline level of the column, in range(7). -(0 = no outline)

-
-
width [#]
-
-

Width of the column in 1/256 of the width of the zero character, -using default font (first FONT record in the file).

-
-
xf_index [#]
-
-

XF index to be used for formatting empty cells.

-
-
-

The EqNeAttrs Class

-
-
EqNeAttrs (class) [#]
-
-

This mixin class exists solely so that Format, Font, and XF.... objects -can be compared by value of their attributes.

-
-
-

The Font Class

-
-
Font (class) [#]
-
-

An Excel "font" contains the details of not only what is normally -considered a font, but also several other display attributes. -Items correspond to those in the Excel UI's Format/Cells/Font tab. -
-- New in version 0.6.1 -

-
bold [#]
-
-

1 = Characters are bold. Redundant; see "weight" attribute.

-
-
character_set [#]
-
-

Values: 0 = ANSI Latin, 1 = System default, 2 = Symbol, -77 = Apple Roman, -128 = ANSI Japanese Shift-JIS, -129 = ANSI Korean (Hangul), -130 = ANSI Korean (Johab), -134 = ANSI Chinese Simplified GBK, -136 = ANSI Chinese Traditional BIG5, -161 = ANSI Greek, -162 = ANSI Turkish, -163 = ANSI Vietnamese, -177 = ANSI Hebrew, -178 = ANSI Arabic, -186 = ANSI Baltic, -204 = ANSI Cyrillic, -222 = ANSI Thai, -238 = ANSI Latin II (Central European), -255 = OEM Latin I

-
-
colour_index [#]
-
-

An explanation of "colour index" is given in the Formatting -section at the start of this document.

-
-
escapement [#]
-
-

1 = Superscript, 2 = Subscript.

-
-
family [#]
-
-

0 = None (unknown or don't care)
-1 = Roman (variable width, serifed)
-2 = Swiss (variable width, sans-serifed)
-3 = Modern (fixed width, serifed or sans-serifed)
-4 = Script (cursive)
-5 = Decorative (specialised, for example Old English, Fraktur) -

-
font_index [#]
-
-

The 0-based index used to refer to this Font() instance. -Note that index 4 is never used; xlrd supplies a dummy place-holder.

-
-
height [#]
-
-

Height of the font (in twips). A twip = 1/20 of a point.

-
-
italic [#]
-
-

1 = Characters are italic.

-
-
name [#]
-
-

The name of the font. Example: u"Arial"

-
-
outline [#]
-
-

1 = Font is outline style (Macintosh only)

-
-
shadow [#]
-
-

1 = Font is shadow style (Macintosh only)

-
-
struck_out [#]
-
-

1 = Characters are struck out.

-
-
underline_type [#]
-
-

0 = None
-1 = Single; 0x21 (33) = Single accounting
-2 = Double; 0x22 (34) = Double accounting -

-
underlined [#]
-
-

1 = Characters are underlined. Redundant; see "underline_type" attribute.

-
-
weight [#]
-
-

Font weight (100-1000). Standard values are 400 for normal text -and 700 for bold text.

-
-
-

The Format Class

-
-
Format(format_key, ty, format_str) (class) [#]
-
-

"Number format" information from a FORMAT record. -
-- New in version 0.6.1 -

-
format_key [#]
-
-

The key into Book.format_map

-
-
format_str [#]
-
-

The format string

-
-
type [#]
-
-

A classification that has been inferred from the format string. -Currently, this is used only to distinguish between numbers and dates. -
Values: -
FUN = 0 # unknown -
FDT = 1 # date -
FNU = 2 # number -
FGE = 3 # general -
FTX = 4 # text -

-
-

The Hyperlink Class

-
-
Hyperlink (class) [#]
-
-

Contains the attributes of a hyperlink. -Hyperlink objects are accessible through Sheet.hyperlink_list -and Sheet.hyperlink_map. -
-- New in version 0.7.2 -

-
-
desc [#]
-
-

Description ... this is displayed in the cell, -and should be identical to the cell value. Unicode string, or None. It seems -impossible NOT to have a description created by the Excel UI.

-
-
fcolx [#]
-
-

Index of first column

-
-
frowx [#]
-
-

Index of first row

-
-
lcolx [#]
-
-

Index of last column

-
-
lrowx [#]
-
-

Index of last row

-
-
quicktip [#]
-
-

The text of the "quick tip" displayed when the cursor -hovers over the hyperlink.

-
-
target [#]
-
-

Target frame. Unicode string. Note: I have not seen a case of this. -It seems impossible to create one in the Excel UI.

-
-
textmark [#]
-
-

"Textmark": the piece after the "#" in -"http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99 -part when type is "workbook".

-
-
type [#]
-
-

Type of hyperlink. Unicode string, one of 'url', 'unc', -'local file', 'workbook', 'unknown'

-
-
url_or_path [#]
-
-

The URL or file-path, depending in the type. Unicode string, except -in the rare case of a local but non-existent file with non-ASCII -characters in the name, in which case only the "8.3" filename is available, -as a bytes (3.x) or str (2.x) string, with unknown encoding. -

-
-

The Name Class

-
-
Name (class) [#]
-
-

Information relating to a named reference, formula, macro, etc. -
-- New in version 0.6.0 -
-- Name information is not extracted from files older than -Excel 5.0 (Book.biff_version < 50) -

-
area2d(clipped=True) [#]
-
-

This is a convenience method for the use case where the name -refers to one rectangular area in one worksheet.

-
-
clipped
-
-If true (the default), the returned rectangle is clipped -to fit in (0, sheet.nrows, 0, sheet.ncols) -- it is guaranteed that -0 <= rowxlo <= rowxhi <= sheet.nrows and that the number of usable rows -in the area (which may be zero) is rowxhi - rowxlo; likewise for columns. -
-
Returns:
-
-a tuple (sheet_object, rowxlo, rowxhi, colxlo, colxhi).
-
Raises XLRDError:
-The name is not a constant absolute reference -to a single area in a single sheet.
-

-
-
binary [#]
-
-

0 = Formula definition; 1 = Binary data
No examples have been sighted. -

-
builtin [#]
-
-

0 = User-defined name; 1 = Built-in name -(common examples: Print_Area, Print_Titles; see OOo docs for full list)

-
-
cell() [#]
-
-

This is a convenience method for the frequent use case where the name -refers to a single cell.

-
-
Returns:
-
-An instance of the Cell class.
-
Raises XLRDError:
-The name is not a constant absolute reference -to a single cell.
-

-
-
complex [#]
-
-

0 = Simple formula; 1 = Complex formula (array formula or user defined)
-No examples have been sighted. -

-
func [#]
-
-

0 = Command macro; 1 = Function macro. Relevant only if macro == 1

-
-
funcgroup [#]
-
-

Function group. Relevant only if macro == 1; see OOo docs for values.

-
-
hidden [#]
-
-

0 = Visible; 1 = Hidden

-
-
macro [#]
-
-

0 = Standard name; 1 = Macro name

-
-
name [#]
-
-

A Unicode string. If builtin, decoded as per OOo docs.

-
-
name_index [#]
-
-

The index of this object in book.name_obj_list

-
-
raw_formula [#]
-
-

An 8-bit string.

-
-
result [#]
-
-

The result of evaluating the formula, if any. -If no formula, or evaluation of the formula encountered problems, -the result is None. Otherwise the result is a single instance of the -Operand class.

-
-
scope [#]
-
-

-1: The name is global (visible in all calculation sheets).
--2: The name belongs to a macro sheet or VBA sheet.
--3: The name is invalid.
-0 <= scope < book.nsheets: The name is local to the sheet whose index is scope. -

-
vbasic [#]
-
-

0 = Sheet macro; 1 = VisualBasic macro. Relevant only if macro == 1

-
-
-

The Note Class

-
-
Note (class) [#]
-
-

Represents a user "comment" or "note". -Note objects are accessible through Sheet.cell_note_map. -
-- New in version 0.7.2 -

-
-
author [#]
-
-

Author of note

-
-
col_hidden [#]
-
-

True if the containing column is hidden

-
-
colx [#]
-
-

Column index

-
-
rich_text_runlist [#]
-
-

List of (offset_in_string, font_index) tuples. -Unlike Sheet.rich_text_runlist_map, the first offset should always be 0. -

-
row_hidden [#]
-
-

True if the containing row is hidden

-
-
rowx [#]
-
-

Row index

-
-
show [#]
-
-

True if note is always shown

-
-
text [#]
-
-

Text of the note

-
-
-

The Operand Class

-
-
Operand(akind=None, avalue=None, arank=0, atext='?') (class) [#]
-
-

Used in evaluating formulas. -The following table describes the kinds and how their values -are represented.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Kind symbolKind numberValue representation
oBOOL3integer: 0 => False; 1 => True
oERR4None, or an int error code (same as XL_CELL_ERROR in the Cell class). -
oMSNG5Used by Excel as a placeholder for a missing (not supplied) function -argument. Should *not* appear as a final formula result. Value is None.
oNUM2A float. Note that there is no way of distinguishing dates.
oREF-1The value is either None or a non-empty list of -absolute Ref3D instances.
-
oREL-2The value is None or a non-empty list of -fully or partially relative Ref3D instances. -
oSTRG1A Unicode string.
oUNK0The kind is unknown or ambiguous. The value is None
-

-

-
kind [#]
-
-

oUNK means that the kind of operand is not known unambiguously.

-
-
text [#]
-
-

The reconstituted text of the original formula. Function names will be -in English irrespective of the original language, which doesn't seem -to be recorded anywhere. The separator is ",", not ";" or whatever else -might be more appropriate for the end-user's locale; patches welcome.

-
-
value [#]
-
-

None means that the actual value of the operand is a variable -(depends on cell data), not a constant.

-
-
-

The Ref3D Class

-
-
Ref3D(atuple) (class) [#]
-
-

Represents an absolute or relative 3-dimensional reference to a box -of one or more cells.
--- New in version 0.6.0 -

- -

The coords attribute is a tuple of the form:
-(shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi)
-where 0 <= thingxlo <= thingx < thingxhi.
-Note that it is quite possible to have thingx > nthings; for example -Print_Titles could have colxhi == 256 and/or rowxhi == 65536 -irrespective of how many columns/rows are actually used in the worksheet. -The caller will need to decide how to handle this situation. -Keyword: IndexError :-) -

- -

The components of the coords attribute are also available as individual -attributes: shtxlo, shtxhi, rowxlo, rowxhi, colxlo, and colxhi.

- -

The relflags attribute is a 6-tuple of flags which indicate whether -the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0).
-Note that there is necessarily no information available as to what cell(s) -the reference could possibly be relative to. The caller must decide what if -any use to make of oREL operands. Note also that a partially relative -reference may well be a typo. -For example, define name A1Z10 as $a$1:$z10 (missing $ after z) -while the cursor is on cell Sheet3!A27.
-The resulting Ref3D instance will have coords = (2, 3, 0, -16, 0, 26) -and relflags = (0, 0, 0, 1, 0, 0).
-So far, only one possibility of a sheet-relative component in -a reference has been noticed: a 2D reference located in the "current sheet". -
This will appear as coords = (0, 1, ...) and relflags = (1, 1, ...). -

-
-

The Rowinfo Class

-
-
Rowinfo() (class) [#]
-
-

Height and default formatting information that applies to a row in a sheet. -Derived from ROW records. -
-- New in version 0.6.1

- -

height: Height of the row, in twips. One twip == 1/20 of a point.

- -

has_default_height: 0 = Row has custom height; 1 = Row has default height.

- -

outline_level: Outline level of the row (0 to 7)

- -

outline_group_starts_ends: 1 = Outline group starts or ends here (depending on where the -outline buttons are located, see WSBOOL record [TODO ??]), -and is collapsed

- -

hidden: 1 = Row is hidden (manually, or by a filter or outline group)

- -

height_mismatch: 1 = Row height and default font height do not match

- -

has_default_xf_index: 1 = the xf_index attribute is usable; 0 = ignore it

- -

xf_index: Index to default XF record for empty cells in this row. -Don't use this if has_default_xf_index == 0.

- -

additional_space_above: This flag is set, if the upper border of at least one cell in this row -or if the lower border of at least one cell in the row above is -formatted with a thick line style. Thin and medium line styles are not -taken into account.

- -

additional_space_below: This flag is set, if the lower border of at least one cell in this row -or if the upper border of at least one cell in the row below is -formatted with a medium or thick line style. Thin line styles are not -taken into account.

-
-
-

The Sheet Class

-
-
Sheet(book, position, name, number) (class) [#]
-
-

Contains the data for one worksheet.

- -

In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a -column index, counting from zero. -Negative values for row/column indexes and slice positions are supported in the expected fashion.

- -

For information about cell types and cell values, refer to the documentation of the Cell class.

- -

WARNING: You don't call this class yourself. You access Sheet objects via the Book object that -was returned when you called xlrd.open_workbook("myfile.xls").

-
-
book [#]
-
-

A reference to the Book object to which this sheet belongs. -Example usage: some_sheet.book.datemode

-
-
cell(rowx, colx) [#]
-
-

Cell object in the given row and column. -

-
cell_note_map [#]
-
-

A sparse mapping from (rowx, colx) to a Note object. -Cells not containing a note ("comment") are not mapped. -
-- New in version 0.7.2

-
-
cell_type(rowx, colx) [#]
-
-

Type of the cell in the given row and column. -Refer to the documentation of the Cell class. -

-
cell_value(rowx, colx) [#]
-
-

Value of the cell in the given row and column.

-
-
cell_xf_index(rowx, colx) [#]
-
-

XF index of the cell in the given row and column. -This is an index into Book.xf_list. -
-- New in version 0.6.1 -

-
col(colx) [#]
-
-

Returns a sequence of the Cell objects in the given column. -

-
col_label_ranges [#]
-
-

List of address ranges of cells containing column labels. -These are set up in Excel by Insert > Name > Labels > Columns. -
-- New in version 0.6.0 -
How to deconstruct the list: -

-for crange in thesheet.col_label_ranges:
-    rlo, rhi, clo, chi = crange
-    for rx in xrange(rlo, rhi):
-        for cx in xrange(clo, chi):
-            print "Column label at (rowx=%d, colx=%d) is %r" \
-                (rx, cx, thesheet.cell_value(rx, cx))
-
-

-
col_slice(colx, start_rowx=0, end_rowx=None) [#]
-
-

Returns a slice of the Cell objects in the given column. -

-
col_types(colx, start_rowx=0, end_rowx=None) [#]
-
-

Returns a slice of the types of the cells in the given column.

-
-
col_values(colx, start_rowx=0, end_rowx=None) [#]
-
-

Returns a slice of the values of the cells in the given column.

-
-
colinfo_map [#]
-
-

The map from a column index to a Colinfo object. Often there is an entry -in COLINFO records for all column indexes in range(257). -Note that xlrd ignores the entry for the non-existent -257th column. On the other hand, there may be no entry for unused columns. -
-- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). -

-
computed_column_width(colx) [#]
-
-

Determine column display width. -
-- New in version 0.6.1 -
-

-
colx
-
-Index of the queried column, range 0 to 255. -Note that it is possible to find out the width that will be used to display -columns with no cell information e.g. column IV (colx=255).
-
Returns:
-
-The column width that will be used for displaying -the given column by Excel, in units of 1/256th of the width of a -standard character (the digit zero in the first font).
-

-
-
default_additional_space_above [#]
-
-

Default value to be used for a row if there is -no ROW record for that row. -From the optional DEFAULTROWHEIGHT record. -

-
default_additional_space_below [#]
-
-

Default value to be used for a row if there is -no ROW record for that row. -From the optional DEFAULTROWHEIGHT record. -

-
default_row_height [#]
-
-

Default value to be used for a row if there is -no ROW record for that row. -From the optional DEFAULTROWHEIGHT record. -

-
default_row_height_mismatch [#]
-
-

Default value to be used for a row if there is -no ROW record for that row. -From the optional DEFAULTROWHEIGHT record. -

-
default_row_hidden [#]
-
-

Default value to be used for a row if there is -no ROW record for that row. -From the optional DEFAULTROWHEIGHT record. -

-
defcolwidth [#]
-
-

Default column width from DEFCOLWIDTH record, else None. -From the OOo docs:
-"""Column width in characters, using the width of the zero character -from default font (first FONT record in the file). Excel adds some -extra space to the default width, depending on the default font and -default font size. The algorithm how to exactly calculate the resulting -column width is not known.
-Example: The default width of 8 set in this record results in a column -width of 8.43 using Arial font with a size of 10 points."""
-For the default hierarchy, refer to the Colinfo class. -
-- New in version 0.6.1 -

-
gcw [#]
-
-

A 256-element tuple corresponding to the contents of the GCW record for this sheet. -If no such record, treat as all bits zero. -Applies to BIFF4-7 only. See docs of the Colinfo class for discussion. -

-
has_pane_record [#]
-
-

Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy

-
-
horizontal_page_breaks [#]
-
-

A list of the horizontal page breaks in this sheet. -Breaks are tuples in the form (index of row after break, start col index, end col index). -Populated only if open_workbook(formatting_info=True). -
-- New in version 0.7.2 -

-
horz_split_first_visible [#]
-
-

Index of first visible row in bottom frozen/split pane

-
-
horz_split_pos [#]
-
-

Number of rows in top pane (frozen panes; for split panes, see comments below in code)

-
-
hyperlink_list [#]
-
-

A list of Hyperlink objects corresponding to HLINK records found -in the worksheet.
-- New in version 0.7.2

-
-
hyperlink_map [#]
-
-

A sparse mapping from (rowx, colx) to an item in hyperlink_list. -Cells not covered by a hyperlink are not mapped. -It is possible using the Excel UI to set up a hyperlink that -covers a larger-than-1x1 rectangle of cells. -Hyperlink rectangles may overlap (Excel doesn't check). -When a multiply-covered cell is clicked on, the hyperlink that is activated -(and the one that is mapped here) is the last in hyperlink_list. -
-- New in version 0.7.2

-
-
merged_cells [#]
-
-

List of address ranges of cells which have been merged. -These are set up in Excel by Format > Cells > Alignment, then ticking -the "Merge cells" box. -
-- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True). -
How to deconstruct the list: -

-for crange in thesheet.merged_cells:
-    rlo, rhi, clo, chi = crange
-    for rowx in xrange(rlo, rhi):
-        for colx in xrange(clo, chi):
-            # cell (rlo, clo) (the top left one) will carry the data
-            # and formatting info; the remainder will be recorded as
-            # blank cells, but a renderer will apply the formatting info
-            # for the top left cell (e.g. border, pattern) to all cells in
-            # the range.
-
-

-
name [#]
-
-

Name of sheet.

-
-
ncols [#]
-
-

Nominal number of columns in sheet. It is 1 + the maximum column index -found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?) -and Sheet.row_len(row_index). -

-
nrows [#]
-
-

Number of rows in sheet. A row index is in range(thesheet.nrows).

-
-
rich_text_runlist_map [#]
-
-

Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset -defines where in the string the font begins to be used. -Offsets are expected to be in ascending order. -If the first offset is not zero, the meaning is that the cell's XF's font should -be used from offset 0. -
This is a sparse mapping. There is no entry for cells that are not formatted with -rich text. -
How to use: -

-runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
-if runlist:
-    for offset, font_index in runlist:
-        # do work here.
-        pass
-
-Populated only if open_workbook(formatting_info=True). -
-- New in version 0.7.2. -
  -

-
row(rowx) [#]
-
-

Returns a sequence of the Cell objects in the given row. -

-
row_label_ranges [#]
-
-

List of address ranges of cells containing row labels. -For more details, see col_label_ranges above. -
-- New in version 0.6.0 -

-
row_len(rowx) [#]
-
-

Returns the effective number of cells in the given row. For use with -open_workbook(ragged_rows=True) which is likely to produce rows -with fewer than ncols cells. -
-- New in version 0.7.2 -

-
row_slice(rowx, start_colx=0, end_colx=None) [#]
-
-

Returns a slice of the Cell objects in the given row. -

-
row_types(rowx, start_colx=0, end_colx=None) [#]
-
-

Returns a slice of the types -of the cells in the given row.

-
-
row_values(rowx, start_colx=0, end_colx=None) [#]
-
-

Returns a slice of the values -of the cells in the given row.

-
-
rowinfo_map [#]
-
-

The map from a row index to a Rowinfo object. Note that it is possible -to have missing entries -- at least one source of XLS files doesn't -bother writing ROW records. -
-- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). -

-
split_active_pane [#]
-
-

Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs.

-
-
standardwidth [#]
-
-

Default column width from STANDARDWIDTH record, else None. -From the OOo docs:
-"""Default width of the columns in 1/256 of the width of the zero -character, using default font (first FONT record in the file)."""
-For the default hierarchy, refer to the Colinfo class. -
-- New in version 0.6.1 -

-
vert_split_first_visible [#]
-
-

Index of first visible column in right frozen/split pane

-
-
vert_split_pos [#]
-
-

Number of columns in left pane (frozen panes; for split panes, see comments below in code)

-
-
vertical_page_breaks [#]
-
-

A list of the vertical page breaks in this sheet. -Breaks are tuples in the form (index of col after break, start row index, end row index). -Populated only if open_workbook(formatting_info=True). -
-- New in version 0.7.2 -

-
visibility [#]
-
-

Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden -by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden -only by VBA macro).

-
-
-

The XF Class

-
-
XF (class) [#]
-
-

eXtended Formatting information for cells, rows, columns and styles. -
-- New in version 0.6.1 - -

Each of the 6 flags below describes the validity of -a specific group of attributes. -
-In cell XFs, flag==0 means the attributes of the parent style XF are used, -(but only if the attributes are valid there); flag==1 means the attributes -of this XF are used.
-In style XFs, flag==0 means the attribute setting is valid; flag==1 means -the attribute should be ignored.
-Note that the API -provides both "raw" XFs and "computed" XFs -- in the latter case, cell XFs -have had the above inheritance mechanism applied. -

-
-
_alignment_flag [#]
-
-
-
_background_flag [#]
-
-
-
_border_flag [#]
-
-
-
_font_flag [#]
-
-
-
_format_flag [#]
-
-
-
_protection_flag [#]
-
-

  -

-
alignment [#]
-
-

An instance of an XFAlignment object.

-
-
background [#]
-
-

An instance of an XFBackground object.

-
-
border [#]
-
-

An instance of an XFBorder object.

-
-
font_index [#]
-
-

Index into Book.font_list

-
-
format_key [#]
-
-

Key into Book.format_map -

-Warning: OOo docs on the XF record call this "Index to FORMAT record". -It is not an index in the Python sense. It is a key to a map. -It is true only for Excel 4.0 and earlier files -that the key into format_map from an XF instance -is the same as the index into format_list, and only -if the index is less than 164. -

-
-
is_style [#]
-
-

0 = cell XF, 1 = style XF

-
-
parent_style_index [#]
-
-

cell XF: Index into Book.xf_list -of this XF's style XF
-style XF: 0xFFF -

-
protection [#]
-
-

An instance of an XFProtection object.

-
-
xf_index [#]
-
-

Index into Book.xf_list

-
-
-

The XFAlignment Class

-
-
XFAlignment (class) [#]
-
-

A collection of the alignment and similar attributes of an XF record. -Items correspond to those in the Excel UI's Format/Cells/Alignment tab. -
-- New in version 0.6.1 -

-
hor_align [#]
-
-

Values: section 6.115 (p 214) of OOo docs

-
-
indent_level [#]
-
-

A number in range(15).

-
-
rotation [#]
-
-

Values: section 6.115 (p 215) of OOo docs.
-Note: file versions BIFF7 and earlier use the documented -"orientation" attribute; this will be mapped (without loss) -into "rotation". -

-
shrink_to_fit [#]
-
-

1 = shrink font size to fit text into cell.

-
-
text_direction [#]
-
-

0 = according to context; 1 = left-to-right; 2 = right-to-left

-
-
text_wrapped [#]
-
-

1 = text is wrapped at right margin

-
-
vert_align [#]
-
-

Values: section 6.115 (p 215) of OOo docs

-
-
-

The XFBackground Class

-
-
XFBackground (class) [#]
-
-

A collection of the background-related attributes of an XF record. -Items correspond to those in the Excel UI's Format/Cells/Patterns tab. -An explanation of "colour index" is given in the Formatting -section at the start of this document. -
-- New in version 0.6.1 -

-
background_colour_index [#]
-
-

See section 3.11 of the OOo docs.

-
-
fill_pattern [#]
-
-

See section 3.11 of the OOo docs.

-
-
pattern_colour_index [#]
-
-

See section 3.11 of the OOo docs.

-
-
-

The XFBorder Class

-
-
XFBorder (class) [#]
-
-

A collection of the border-related attributes of an XF record. -Items correspond to those in the Excel UI's Format/Cells/Border tab.

-

An explanations of "colour index" is given in the Formatting -section at the start of this document. -There are five line style attributes; possible values and the -associated meanings are: -0 = No line, -1 = Thin, -2 = Medium, -3 = Dashed, -4 = Dotted, -5 = Thick, -6 = Double, -7 = Hair, -8 = Medium dashed, -9 = Thin dash-dotted, -10 = Medium dash-dotted, -11 = Thin dash-dot-dotted, -12 = Medium dash-dot-dotted, -13 = Slanted medium dash-dotted. -The line styles 8 to 13 appear in BIFF8 files (Excel 97 and later) only. -For pictures of the line styles, refer to OOo docs s3.10 (p22) -"Line Styles for Cell Borders (BIFF3-BIFF8)".

-
-- New in version 0.6.1 -
-
bottom_colour_index [#]
-
-

The colour index for the cell's bottom line

-
-
bottom_line_style [#]
-
-

The line style for the cell's bottom line

-
-
diag_colour_index [#]
-
-

The colour index for the cell's diagonal lines, if any

-
-
diag_down [#]
-
-

1 = draw a diagonal from top left to bottom right

-
-
diag_line_style [#]
-
-

The line style for the cell's diagonal lines, if any

-
-
diag_up [#]
-
-

1 = draw a diagonal from bottom left to top right

-
-
left_colour_index [#]
-
-

The colour index for the cell's left line

-
-
left_line_style [#]
-
-

The line style for the cell's left line

-
-
right_colour_index [#]
-
-

The colour index for the cell's right line

-
-
right_line_style [#]
-
-

The line style for the cell's right line

-
-
top_colour_index [#]
-
-

The colour index for the cell's top line

-
-
top_line_style [#]
-
-

The line style for the cell's top line

-
-
-

The XFProtection Class

-
-
XFProtection (class) [#]
-
-

A collection of the protection-related attributes of an XF record. -Items correspond to those in the Excel UI's Format/Cells/Protection tab. -Note the OOo docs include the "cell or style" bit -in this bundle of attributes. -This is incorrect; the bit is used in determining which bundles to use. -
-- New in version 0.6.1 -

-
cell_locked [#]
-
-

1 = Cell is prevented from being changed, moved, resized, or deleted -(only if the sheet is protected).

-
-
formula_hidden [#]
-
-

1 = Hide formula so that it doesn't appear in the formula bar when -the cell is selected (only if the sheet is protected).

-
-
- diff --git a/SUEWSPrepare/Modules/xlrd/examples/namesdemo.xls b/SUEWSPrepare/Modules/xlrd/examples/namesdemo.xls deleted file mode 100644 index 8a16865..0000000 Binary files a/SUEWSPrepare/Modules/xlrd/examples/namesdemo.xls and /dev/null differ diff --git a/SUEWSPrepare/Modules/xlrd/examples/xlrdnameAPIdemo.py b/SUEWSPrepare/Modules/xlrd/examples/xlrdnameAPIdemo.py deleted file mode 100644 index b29a827..0000000 --- a/SUEWSPrepare/Modules/xlrd/examples/xlrdnameAPIdemo.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: cp1252 -*- - -## -# Module/script example of the xlrd API for extracting information -# about named references, named constants, etc. -# -#

Copyright 2006 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a BSD-style licence.

-## -from __future__ import print_function - -import xlrd -from xlrd.timemachine import REPR -import sys -import glob - -def scope_as_string(book, scope): - if 0 <= scope < book.nsheets: - return "sheet #%d (%r)" % (scope, REPR(book.sheet_names()[scope])) - if scope == -1: - return "Global" - if scope == -2: - return "Macro/VBA" - return "Unknown scope value (%r)" % REPR(scope) - -def do_scope_query(book, scope_strg, show_contents=0, f=sys.stdout): - try: - qscope = int(scope_strg) - except ValueError: - if scope_strg == "*": - qscope = None # means "all' - else: - # so assume it's a sheet name ... - qscope = book.sheet_names().index(scope_strg) - print("%r => %d" % (scope_strg, qscope), file=f) - for nobj in book.name_obj_list: - if qscope is None or nobj.scope == qscope: - show_name_object(book, nobj, show_contents, f) - -def show_name_details(book, name, show_contents=0, f=sys.stdout): - """ - book -- Book object obtained from xlrd.open_workbook(). - name -- The name that's being investigated. - show_contents -- 0: Don't; 1: Non-empty cells only; 2: All cells - f -- Open output file handle. - """ - name_lcase = name.lower() # Excel names are case-insensitive. - nobj_list = book.name_map.get(name_lcase) - if not nobj_list: - print("%r: unknown name" % name, file=f) - return - for nobj in nobj_list: - show_name_object(book, nobj, show_contents, f) - -def show_name_details_in_scope( - book, name, scope_strg, show_contents=0, f=sys.stdout, - ): - try: - scope = int(scope_strg) - except ValueError: - # so assume it's a sheet name ... - scope = book.sheet_names().index(scope_strg) - print("%r => %d" % (scope_strg, scope), file=f) - name_lcase = name.lower() # Excel names are case-insensitive. - while 1: - nobj = book.name_and_scope_map.get((name_lcase, scope)) - if nobj: - break - print("Name %s not found in scope %d" % (REPR(name), scope), file=f) - if scope == -1: - return - scope = -1 # Try again with global scope - print("Name %s found in scope %d" % (REPR(name), scope), file=f) - show_name_object(book, nobj, show_contents, f) - -def showable_cell_value(celltype, cellvalue, datemode): - if celltype == xlrd.XL_CELL_DATE: - try: - showval = xlrd.xldate_as_tuple(cellvalue, datemode) - except xlrd.XLDateError as e: - showval = "%s:%s" % (type(e).__name__, e) - elif celltype == xlrd.XL_CELL_ERROR: - showval = xlrd.error_text_from_code.get( - cellvalue, '' % cellvalue) - else: - showval = cellvalue - return showval - -def show_name_object(book, nobj, show_contents=0, f=sys.stdout): - print("\nName: %s, scope: %s (%s)" \ - % (REPR(nobj.name), REPR(nobj.scope), scope_as_string(book, nobj.scope)), file=f) - res = nobj.result - print("Formula eval result: %s" % REPR(res), file=f) - if res is None: - return - # result should be an instance of the Operand class - kind = res.kind - value = res.value - if kind >= 0: - # A scalar, or unknown ... you've seen all there is to see. - pass - elif kind == xlrd.oREL: - # A list of Ref3D objects representing *relative* ranges - for i in range(len(value)): - ref3d = value[i] - print("Range %d: %s ==> %s"% (i, REPR(ref3d.coords), REPR(xlrd.rangename3drel(book, ref3d))), file=f) - elif kind == xlrd.oREF: - # A list of Ref3D objects - for i in range(len(value)): - ref3d = value[i] - print("Range %d: %s ==> %s"% (i, REPR(ref3d.coords), REPR(xlrd.rangename3d(book, ref3d))), file=f) - if not show_contents: - continue - datemode = book.datemode - for shx in range(ref3d.shtxlo, ref3d.shtxhi): - sh = book.sheet_by_index(shx) - print(" Sheet #%d (%s)" % (shx, sh.name), file=f) - rowlim = min(ref3d.rowxhi, sh.nrows) - collim = min(ref3d.colxhi, sh.ncols) - for rowx in range(ref3d.rowxlo, rowlim): - for colx in range(ref3d.colxlo, collim): - cty = sh.cell_type(rowx, colx) - if cty == xlrd.XL_CELL_EMPTY and show_contents == 1: - continue - cval = sh.cell_value(rowx, colx) - sval = showable_cell_value(cty, cval, datemode) - print(" (%3d,%3d) %-5s: %s" - % (rowx, colx, xlrd.cellname(rowx, colx), REPR(sval)), file=f) - -if __name__ == "__main__": - def usage(): - text = """ -usage: xlrdnameAIPdemo.py glob_pattern name scope show_contents - -where: - "glob_pattern" designates a set of files - "name" is a name or '*' (all names) - "scope" is -1 (global) or a sheet number - or a sheet name or * (all scopes) - "show_contents" is one of 0 (no show), - 1 (only non-empty cells), or 2 (all cells) - -Examples (script name and glob_pattern arg omitted for brevity) - [Searching through book.name_obj_list] - * * 0 lists all names - * * 1 lists all names, showing referenced non-empty cells - * 1 0 lists all names local to the 2nd sheet - * Northern 0 lists all names local to the 'Northern' sheet - * -1 0 lists all names with global scope - [Initial direct access through book.name_map] - Sales * 0 lists all occurrences of "Sales" in any scope - [Direct access through book.name_and_scope_map] - Revenue -1 0 checks if "Revenue" exists in global scope - -""" - sys.stdout.write(text) - - if len(sys.argv) != 5: - usage() - sys.exit(0) - arg_pattern = sys.argv[1] # glob pattern e.g. "foo*.xls" - arg_name = sys.argv[2] # see below - arg_scope = sys.argv[3] # see below - arg_show_contents = int(sys.argv[4]) # 0: no show, 1: only non-empty cells, - # 2: all cells - for fname in glob.glob(arg_pattern): - book = xlrd.open_workbook(fname) - if arg_name == "*": - # Examine book.name_obj_list to find all names - # in a given scope ("*" => all scopes) - do_scope_query(book, arg_scope, arg_show_contents) - elif arg_scope == "*": - # Using book.name_map to find all usage of a name. - show_name_details(book, arg_name, arg_show_contents) - else: - # Using book.name_and_scope_map to find which if any instances - # of a name are visible in the given scope, which can be supplied - # as -1 (global) or a sheet number or a sheet name. - show_name_details_in_scope(book, arg_name, arg_scope, arg_show_contents) diff --git a/SUEWSPrepare/Modules/xlrd/formatting.py b/SUEWSPrepare/Modules/xlrd/formatting.py index f044915..9e4db6a 100644 --- a/SUEWSPrepare/Modules/xlrd/formatting.py +++ b/SUEWSPrepare/Modules/xlrd/formatting.py @@ -1,26 +1,26 @@ -# -*- coding: cp1252 -*- - -## -# Module for formatting information. -# -#

Copyright 2005-2012 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under -# a BSD-style licence.

-## - -# No part of the content of this file was derived from the works of David Giffin. +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. +# No part of the content of this file was derived from the works of +# David Giffin. +""" +Module for formatting information. +""" from __future__ import print_function -DEBUG = 0 import re from struct import unpack + +from .biffh import ( + FDT, FGE, FNU, FTX, FUN, XL_CELL_DATE, XL_CELL_NUMBER, XL_CELL_TEXT, + XL_FORMAT, XL_FORMAT2, BaseObject, XLRDError, fprintf, unpack_string, + unpack_unicode, upkbits, upkbitsL, +) from .timemachine import * -from .biffh import BaseObject, unpack_unicode, unpack_string, \ - upkbits, upkbitsL, fprintf, \ - FUN, FDT, FNU, FGE, FTX, XL_CELL_NUMBER, XL_CELL_DATE, \ - XL_FORMAT, XL_FORMAT2, \ - XLRDError + +DEBUG = 0 _cellty_from_fmtty = { FNU: XL_CELL_NUMBER, @@ -28,8 +28,8 @@ FGE: XL_CELL_NUMBER, FDT: XL_CELL_DATE, FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text. - } - +} + excel_default_palette_b5 = ( ( 0, 0, 0), (255, 255, 255), (255, 0, 0), ( 0, 255, 0), ( 0, 0, 255), (255, 255, 0), (255, 0, 255), ( 0, 255, 255), @@ -45,7 +45,7 @@ (255, 153, 0), (255, 102, 0), (102, 102, 153), (150, 150, 150), ( 0, 51, 102), ( 51, 153, 102), ( 0, 51, 0), ( 51, 51, 0), (153, 51, 0), (153, 51, 102), ( 51, 51, 153), ( 51, 51, 51), - ) +) excel_default_palette_b2 = excel_default_palette_b5[:16] @@ -66,7 +66,7 @@ (255,153, 0), (255,102, 0), (102,102,153), (150,150,150), # 44 ( 0, 51,102), ( 51,153,102), ( 0, 51, 0), ( 51, 51, 0), # 48 (153, 51, 0), (153, 51,102), ( 51, 51,153), ( 51, 51, 51), # 52 - ) +) default_palette = { 80: excel_default_palette_b8, @@ -77,20 +77,18 @@ 30: excel_default_palette_b2, 21: excel_default_palette_b2, 20: excel_default_palette_b2, - } +} -""" -00H = Normal -01H = RowLevel_lv (see next field) -02H = ColLevel_lv (see next field) -03H = Comma -04H = Currency -05H = Percent -06H = Comma [0] (BIFF4-BIFF8) -07H = Currency [0] (BIFF4-BIFF8) -08H = Hyperlink (BIFF8) -09H = Followed Hyperlink (BIFF8) -""" +# 00H = Normal +# 01H = RowLevel_lv (see next field) +# 02H = ColLevel_lv (see next field) +# 03H = Comma +# 04H = Currency +# 05H = Percent +# 06H = Comma [0] (BIFF4-BIFF8) +# 07H = Currency [0] (BIFF4-BIFF8) +# 08H = Hyperlink (BIFF8) +# 09H = Followed Hyperlink (BIFF8) built_in_style_names = [ "Normal", "RowLevel_", @@ -102,7 +100,7 @@ "Currency [0]", "Hyperlink", "Followed Hyperlink", - ] +] def initialise_colour_map(book): book.colour_map = {} @@ -121,18 +119,20 @@ def initialise_colour_map(book): # System window text colour for border lines book.colour_map[ndpal+8] = None # System window background colour for pattern background - book.colour_map[ndpal+8+1] = None # - for ci in ( - 0x51, # System ToolTip text colour (used in note objects) - 0x7FFF, # 32767, system window text colour for fonts - ): - book.colour_map[ci] = None + book.colour_map[ndpal+8+1] = None + # System ToolTip text colour (used in note objects) + book.colour_map[0x51] = None + # 32767, system window text colour for fonts + book.colour_map[0x7FFF] = None + def nearest_colour_index(colour_map, rgb, debug=0): - # General purpose function. Uses Euclidean distance. - # So far used only for pre-BIFF8 WINDOW2 record. - # Doesn't have to be fast. - # Doesn't have to be fancy. + """ + General purpose function. Uses Euclidean distance. + So far used only for pre-BIFF8 ``WINDOW2`` record. + Doesn't have to be fast. + Doesn't have to be fancy. + """ best_metric = 3 * 256 * 256 best_colourx = 0 for colourx, cand_rgb in colour_map.items(): @@ -147,14 +147,15 @@ def nearest_colour_index(colour_map, rgb, debug=0): if metric == 0: break if 0 and debug: - print("nearest_colour_index for %r is %r -> %r; best_metric is %d" \ + print("nearest_colour_index for %r is %r -> %r; best_metric is %d" % (rgb, best_colourx, colour_map[best_colourx], best_metric)) return best_colourx -## -# This mixin class exists solely so that Format, Font, and XF.... objects -# can be compared by value of their attributes. class EqNeAttrs(object): + """ + This mixin class exists solely so that :class:`Format`, :class:`Font`, and + :class:`XF` objects can be compared by value of their attributes. + """ def __eq__(self, other): return self.__dict__ == other.__dict__ @@ -162,85 +163,93 @@ def __eq__(self, other): def __ne__(self, other): return self.__dict__ != other.__dict__ -## -# An Excel "font" contains the details of not only what is normally -# considered a font, but also several other display attributes. -# Items correspond to those in the Excel UI's Format/Cells/Font tab. -#
-- New in version 0.6.1 class Font(BaseObject, EqNeAttrs): - ## - # 1 = Characters are bold. Redundant; see "weight" attribute. + """ + An Excel "font" contains the details of not only what is normally + considered a font, but also several other display attributes. + Items correspond to those in the Excel UI's Format -> Cells -> Font tab. + + .. versionadded:: 0.6.1 + """ + + #: 1 = Characters are bold. Redundant; see "weight" attribute. bold = 0 - ## - # Values: 0 = ANSI Latin, 1 = System default, 2 = Symbol, - # 77 = Apple Roman, - # 128 = ANSI Japanese Shift-JIS, - # 129 = ANSI Korean (Hangul), - # 130 = ANSI Korean (Johab), - # 134 = ANSI Chinese Simplified GBK, - # 136 = ANSI Chinese Traditional BIG5, - # 161 = ANSI Greek, - # 162 = ANSI Turkish, - # 163 = ANSI Vietnamese, - # 177 = ANSI Hebrew, - # 178 = ANSI Arabic, - # 186 = ANSI Baltic, - # 204 = ANSI Cyrillic, - # 222 = ANSI Thai, - # 238 = ANSI Latin II (Central European), - # 255 = OEM Latin I + + #: Values:: + #: + #: 0 = ANSI Latin + #: 1 = System default + #: 2 = Symbol, + #: 77 = Apple Roman, + #: 128 = ANSI Japanese Shift-JIS, + #: 129 = ANSI Korean (Hangul), + #: 130 = ANSI Korean (Johab), + #: 134 = ANSI Chinese Simplified GBK, + #: 136 = ANSI Chinese Traditional BIG5, + #: 161 = ANSI Greek, + #: 162 = ANSI Turkish, + #: 163 = ANSI Vietnamese, + #: 177 = ANSI Hebrew, + #: 178 = ANSI Arabic, + #: 186 = ANSI Baltic, + #: 204 = ANSI Cyrillic, + #: 222 = ANSI Thai, + #: 238 = ANSI Latin II (Central European), + #: 255 = OEM Latin I character_set = 0 - ## - # An explanation of "colour index" is given in the Formatting - # section at the start of this document. + + #: An explanation of "colour index" is given in :ref:`palette`. colour_index = 0 - ## - # 1 = Superscript, 2 = Subscript. + + #: 1 = Superscript, 2 = Subscript. escapement = 0 - ## - # 0 = None (unknown or don't care)
- # 1 = Roman (variable width, serifed)
- # 2 = Swiss (variable width, sans-serifed)
- # 3 = Modern (fixed width, serifed or sans-serifed)
- # 4 = Script (cursive)
- # 5 = Decorative (specialised, for example Old English, Fraktur) + + #: Values:: + #: + #: 0 = None (unknown or don't care) + #: 1 = Roman (variable width, serifed) + #: 2 = Swiss (variable width, sans-serifed) + #: 3 = Modern (fixed width, serifed or sans-serifed) + #: 4 = Script (cursive) + #: 5 = Decorative (specialised, for example Old English, Fraktur) family = 0 - ## - # The 0-based index used to refer to this Font() instance. - # Note that index 4 is never used; xlrd supplies a dummy place-holder. + + #: The 0-based index used to refer to this Font() instance. + #: Note that index 4 is never used; xlrd supplies a dummy place-holder. font_index = 0 - ## - # Height of the font (in twips). A twip = 1/20 of a point. + + #: Height of the font (in twips). A twip = 1/20 of a point. height = 0 - ## - # 1 = Characters are italic. + + #: 1 = Characters are italic. italic = 0 - ## - # The name of the font. Example: u"Arial" + + #: The name of the font. Example: ``"Arial"``. name = UNICODE_LITERAL("") - ## - # 1 = Characters are struck out. + + #: 1 = Characters are struck out. struck_out = 0 - ## - # 0 = None
- # 1 = Single; 0x21 (33) = Single accounting
- # 2 = Double; 0x22 (34) = Double accounting + + #: Values:: + #: + #: 0 = None + #: 1 = Single; 0x21 (33) = Single accounting + #: 2 = Double; 0x22 (34) = Double accounting underline_type = 0 - ## - # 1 = Characters are underlined. Redundant; see "underline_type" attribute. + + #: 1 = Characters are underlined. Redundant; see + #: :attr:`underline_type` attribute. underlined = 0 - ## - # Font weight (100-1000). Standard values are 400 for normal text - # and 700 for bold text. + + #: Font weight (100-1000). Standard values are 400 for normal text + #: and 700 for bold text. weight = 400 - ## - # 1 = Font is outline style (Macintosh only) + + #: 1 = Font is outline style (Macintosh only) outline = 0 - ## - # 1 = Font is shadow style (Macintosh only) - shadow = 0 - # No methods ... + #: 1 = Font is shadow style (Macintosh only) + shadow = 0 def handle_efont(book, data): # BIFF2 only if not book.formatting_info: @@ -316,29 +325,32 @@ def handle_font(book, data): book.logfile, header="--- handle_font: font[%d] ---" % f.font_index, footer="-------------------", - ) + ) # === "Number formats" === -## -# "Number format" information from a FORMAT record. -#
-- New in version 0.6.1 class Format(BaseObject, EqNeAttrs): - ## - # The key into Book.format_map + """ + "Number format" information from a ``FORMAT`` record. + + .. versionadded:: 0.6.1 + """ + + #: The key into :attr:`~xlrd.book.Book.format_map` format_key = 0 - ## - # A classification that has been inferred from the format string. - # Currently, this is used only to distinguish between numbers and dates. - #
Values: - #
FUN = 0 # unknown - #
FDT = 1 # date - #
FNU = 2 # number - #
FGE = 3 # general - #
FTX = 4 # text + + #: A classification that has been inferred from the format string. + #: Currently, this is used only to distinguish between numbers and dates. + #: Values:: + #: + #: FUN = 0 # unknown + #: FDT = 1 # date + #: FNU = 2 # number + #: FGE = 3 # general + #: FTX = 4 # text type = FUN - ## - # The format string + + #: The format string format_str = UNICODE_LITERAL('') def __init__(self, format_key, ty, format_str): @@ -386,7 +398,7 @@ def __init__(self, format_key, ty, format_str): 0x2f: "mm:ss.0", 0x30: "##0.0E+0", 0x31: "@", - } +} fmt_code_ranges = [ # both-inclusive ranges of "standard" format codes # Source: the openoffice.org doc't @@ -404,7 +416,7 @@ def __init__(self, format_key, ty, format_str): (59, 62, FNU), # Thai number (currency?) formats (67, 70, FNU), # Thai number (currency?) formats (71, 81, FDT), # Thai date formats - ] +] std_format_code_types = {} for lo, hi, ty in fmt_code_ranges: @@ -426,7 +438,7 @@ def __init__(self, format_key, ty, format_str): UNICODE_LITERAL('0'): 5, UNICODE_LITERAL('#'): 5, UNICODE_LITERAL('?'): 5, - } +} non_date_formats = { UNICODE_LITERAL('0.00E+00'):1, @@ -435,14 +447,14 @@ def __init__(self, format_key, ty, format_str): UNICODE_LITERAL('GENERAL') :1, # OOo Calc 1.1.4 does this. UNICODE_LITERAL('general') :1, # pyExcelerator 0.6.3 does this. UNICODE_LITERAL('@') :1, - } +} fmt_bracketed_sub = re.compile(r'\[[^]]*\]').sub # Boolean format strings (actual cases) -# u'"Yes";"Yes";"No"' -# u'"True";"True";"False"' -# u'"On";"On";"Off"' +# '"Yes";"Yes";"No"' +# '"True";"True";"False"' +# '"On";"On";"Off"' def is_date_format_string(book, fmt): # Heuristics: @@ -451,12 +463,12 @@ def is_date_format_string(book, fmt): # E.g. hh\hmm\mss\s should produce a display like 23h59m59s # Date formats have one or more of ymdhs (caseless) in them. # Numeric formats have # and 0. - # N.B. u'General"."' hence get rid of "text" first. + # N.B. 'General"."' hence get rid of "text" first. # TODO: Find where formats are interpreted in Gnumeric - # TODO: u'[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23) + # TODO: '[h]\\ \\h\\o\\u\\r\\s' ([h] means don't care about hours > 23) state = 0 s = '' - + for c in fmt: if state == 0: if c == UNICODE_LITERAL('"'): @@ -565,8 +577,7 @@ def handle_palette(book, data): blah = DEBUG or book.verbosity >= 2 n_colours, = unpack('= 50] - if ((DEBUG or book.verbosity >= 1) - and n_colours != expected_n_colours): + if (DEBUG or book.verbosity >= 1) and n_colours != expected_n_colours: fprintf(book.logfile, "NOTE *** Expected %d colours in PALETTE record, found %d\n", expected_n_colours, n_colours) @@ -623,8 +634,7 @@ def handle_style(book, data): bv = book.biff_version flag_and_xfx, built_in_id, level = unpack('= 3 bv = self.biff_version @@ -707,14 +717,15 @@ def handle_xf(self, data): fill_in_standard_formats(self) if bv >= 80: unpack_fmt = '> 2 - for attr_stem in \ - "format font alignment border background protection".split(): + attr_stems = [ + 'format', + 'font', + 'alignment', + 'border', + 'background', + 'protection', + ] + for attr_stem in attr_stems: attr = "_" + attr_stem + "_flag" setattr(xf, attr, reg & 1) reg >>= 1 @@ -748,45 +766,53 @@ def handle_xf(self, data): (23, 0x3f800000, 'right_colour_index'), (30, 0x40000000, 'diag_down'), (31, 0x80000000, 'diag_up'), - )) + )) upkbits(xf.border, pkd_brdbkg2, ( (0, 0x0000007F, 'top_colour_index'), (7, 0x00003F80, 'bottom_colour_index'), (14, 0x001FC000, 'diag_colour_index'), (21, 0x01E00000, 'diag_line_style'), - )) + )) upkbitsL(xf.background, pkd_brdbkg2, ( (26, 0xFC000000, 'fill_pattern'), - )) + )) upkbits(xf.background, pkd_brdbkg3, ( (0, 0x007F, 'pattern_colour_index'), (7, 0x3F80, 'background_colour_index'), - )) + )) elif bv >= 50: unpack_fmt = '> 2 - for attr_stem in \ - "format font alignment border background protection".split(): + attr_stems = [ + 'format', + 'font', + 'alignment', + 'border', + 'background', + 'protection', + ] + for attr_stem in attr_stems: attr = "_" + attr_stem + "_flag" setattr(xf, attr, reg & 1) reg >>= 1 @@ -794,11 +820,11 @@ def handle_xf(self, data): ( 0, 0x0000007F, 'pattern_colour_index'), ( 7, 0x00003F80, 'background_colour_index'), (16, 0x003F0000, 'fill_pattern'), - )) + )) upkbitsL(xf.border, pkd_brdbkg1, ( (22, 0x01C00000, 'bottom_line_style'), (25, 0xFE000000, 'bottom_colour_index'), - )) + )) upkbits(xf.border, pkd_brdbkg2, ( ( 0, 0x00000007, 'top_line_style'), ( 3, 0x00000038, 'left_line_style'), @@ -806,32 +832,40 @@ def handle_xf(self, data): ( 9, 0x0000FE00, 'top_colour_index'), (16, 0x007F0000, 'left_colour_index'), (23, 0x3F800000, 'right_colour_index'), - )) + )) elif bv >= 40: unpack_fmt = '> 6 xf.alignment.rotation = [0, 255, 90, 180][orientation] reg = pkd_used >> 2 - for attr_stem in \ - "format font alignment border background protection".split(): + attr_stems = [ + 'format', + 'font', + 'alignment', + 'border', + 'background', + 'protection', + ] + for attr_stem in attr_stems: attr = "_" + attr_stem + "_flag" setattr(xf, attr, reg & 1) reg >>= 1 @@ -839,7 +873,7 @@ def handle_xf(self, data): ( 0, 0x003F, 'fill_pattern'), ( 6, 0x07C0, 'pattern_colour_index'), (11, 0xF800, 'background_colour_index'), - )) + )) upkbitsL(xf.border, pkd_brd_34, ( ( 0, 0x00000007, 'top_line_style'), ( 3, 0x000000F8, 'top_colour_index'), @@ -849,31 +883,39 @@ def handle_xf(self, data): (19, 0x00F80000, 'bottom_colour_index'), (24, 0x07000000, 'right_line_style'), (27, 0xF8000000, 'right_colour_index'), - )) + )) elif bv == 30: unpack_fmt = '> 2 - for attr_stem in \ - "format font alignment border background protection".split(): + attr_stems = [ + 'format', + 'font', + 'alignment', + 'border', + 'background', + 'protection', + ] + for attr_stem in attr_stems: attr = "_" + attr_stem + "_flag" setattr(xf, attr, reg & 1) reg >>= 1 @@ -881,7 +923,7 @@ def handle_xf(self, data): ( 0, 0x003F, 'fill_pattern'), ( 6, 0x07C0, 'pattern_colour_index'), (11, 0xF800, 'background_colour_index'), - )) + )) upkbitsL(xf.border, pkd_brd_34, ( ( 0, 0x00000007, 'top_line_style'), ( 3, 0x000000F8, 'top_colour_index'), @@ -891,23 +933,23 @@ def handle_xf(self, data): (19, 0x00F80000, 'bottom_colour_index'), (24, 0x07000000, 'right_line_style'), (27, 0xF8000000, 'right_colour_index'), - )) + )) xf.alignment.vert_align = 2 # bottom xf.alignment.rotation = 0 elif bv == 21: - #### Warning: incomplete treatment; formatting_info not fully supported. - #### Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16], - #### and create XF[0:16] like the standard ones in BIFF8 - #### *AND* add 16 to all XF references in cell records :-( + ## Warning: incomplete treatment; formatting_info not fully supported. + ## Probably need to offset incoming BIFF2 XF[n] to BIFF8-like XF[n+16], + ## and create XF[0:16] like the standard ones in BIFF8 *AND* add 16 to + ## all XF references in cell records :-( (xf.font_index, format_etc, halign_etc) = unpack(' xf.xf_index: fprintf(self.logfile, "NOTE !!! XF[%d]: parent_style_index is %d; out of order?\n", @@ -1049,214 +1096,226 @@ def initialise_book(book): handle_style, handle_xf, xf_epilogue, - ) + ) for method in methods: setattr(book.__class__, method.__name__, method) -## -#

A collection of the border-related attributes of an XF record. -# Items correspond to those in the Excel UI's Format/Cells/Border tab.

-#

An explanations of "colour index" is given in the Formatting -# section at the start of this document. -# There are five line style attributes; possible values and the -# associated meanings are: -# 0 = No line, -# 1 = Thin, -# 2 = Medium, -# 3 = Dashed, -# 4 = Dotted, -# 5 = Thick, -# 6 = Double, -# 7 = Hair, -# 8 = Medium dashed, -# 9 = Thin dash-dotted, -# 10 = Medium dash-dotted, -# 11 = Thin dash-dot-dotted, -# 12 = Medium dash-dot-dotted, -# 13 = Slanted medium dash-dotted. -# The line styles 8 to 13 appear in BIFF8 files (Excel 97 and later) only. -# For pictures of the line styles, refer to OOo docs s3.10 (p22) -# "Line Styles for Cell Borders (BIFF3-BIFF8)".

-#
-- New in version 0.6.1 class XFBorder(BaseObject, EqNeAttrs): + """ + A collection of the border-related attributes of an ``XF`` record. + Items correspond to those in the Excel UI's Format -> Cells -> Border tab. + + An explanations of "colour index" is given in :ref:`palette`. + + There are five line style attributes; possible values and the + associated meanings are:: + + 0 = No line, + 1 = Thin, + 2 = Medium, + 3 = Dashed, + 4 = Dotted, + 5 = Thick, + 6 = Double, + 7 = Hair, + 8 = Medium dashed, + 9 = Thin dash-dotted, + 10 = Medium dash-dotted, + 11 = Thin dash-dot-dotted, + 12 = Medium dash-dot-dotted, + 13 = Slanted medium dash-dotted. - ## - # The colour index for the cell's top line + The line styles 8 to 13 appear in BIFF8 files (Excel 97 and later) only. + For pictures of the line styles, refer to OOo docs s3.10 (p22) + "Line Styles for Cell Borders (BIFF3-BIFF8)".

+ + .. versionadded:: 0.6.1 + """ + + #: The colour index for the cell's top line top_colour_index = 0 - ## - # The colour index for the cell's bottom line + #: The colour index for the cell's bottom line bottom_colour_index = 0 - ## - # The colour index for the cell's left line + + #: The colour index for the cell's left line left_colour_index = 0 - ## - # The colour index for the cell's right line + + #: The colour index for the cell's right line right_colour_index = 0 - ## - # The colour index for the cell's diagonal lines, if any + + #: The colour index for the cell's diagonal lines, if any diag_colour_index = 0 - ## - # The line style for the cell's top line + + #: The line style for the cell's top line top_line_style = 0 - ## - # The line style for the cell's bottom line + + #: The line style for the cell's bottom line bottom_line_style = 0 - ## - # The line style for the cell's left line + + #: The line style for the cell's left line left_line_style = 0 - ## - # The line style for the cell's right line + + #: The line style for the cell's right line right_line_style = 0 - ## - # The line style for the cell's diagonal lines, if any + + #: The line style for the cell's diagonal lines, if any diag_line_style = 0 - ## - # 1 = draw a diagonal from top left to bottom right + + #: 1 = draw a diagonal from top left to bottom right diag_down = 0 - ## - # 1 = draw a diagonal from bottom left to top right + + #: 1 = draw a diagonal from bottom left to top right diag_up = 0 -## -# A collection of the background-related attributes of an XF record. -# Items correspond to those in the Excel UI's Format/Cells/Patterns tab. -# An explanation of "colour index" is given in the Formatting -# section at the start of this document. -#
-- New in version 0.6.1 class XFBackground(BaseObject, EqNeAttrs): + """ + A collection of the background-related attributes of an ``XF`` record. + Items correspond to those in the Excel UI's Format -> Cells -> Patterns tab. + + An explanations of "colour index" is given in :ref:`palette`. - ## - # See section 3.11 of the OOo docs. + .. versionadded:: 0.6.1 + """ + + #: See section 3.11 of the OOo docs. fill_pattern = 0 - ## - # See section 3.11 of the OOo docs. + + #: See section 3.11 of the OOo docs. background_colour_index = 0 - ## - # See section 3.11 of the OOo docs. + + #: See section 3.11 of the OOo docs. pattern_colour_index = 0 -## -# A collection of the alignment and similar attributes of an XF record. -# Items correspond to those in the Excel UI's Format/Cells/Alignment tab. -#
-- New in version 0.6.1 class XFAlignment(BaseObject, EqNeAttrs): + """ + A collection of the alignment and similar attributes of an ``XF`` record. + Items correspond to those in the Excel UI's Format -> Cells -> Alignment tab. - ## - # Values: section 6.115 (p 214) of OOo docs + .. versionadded:: 0.6.1 + """ + + #: Values: section 6.115 (p 214) of OOo docs hor_align = 0 - ## - # Values: section 6.115 (p 215) of OOo docs + + #: Values: section 6.115 (p 215) of OOo docs vert_align = 0 - ## - # Values: section 6.115 (p 215) of OOo docs.
- # Note: file versions BIFF7 and earlier use the documented - # "orientation" attribute; this will be mapped (without loss) - # into "rotation". + + #: Values: section 6.115 (p 215) of OOo docs. + #: + #: .. note:: + #: file versions BIFF7 and earlier use the documented + #: :attr:`orientation` attribute; this will be mapped (without loss) + #: into :attr:`rotation`. rotation = 0 - ## - # 1 = text is wrapped at right margin + + #: 1 = text is wrapped at right margin text_wrapped = 0 - ## - # A number in range(15). + + #: A number in ``range(15)``. indent_level = 0 - ## - # 1 = shrink font size to fit text into cell. + + #: 1 = shrink font size to fit text into cell. shrink_to_fit = 0 - ## - # 0 = according to context; 1 = left-to-right; 2 = right-to-left - text_direction = 0 -## -# A collection of the protection-related attributes of an XF record. -# Items correspond to those in the Excel UI's Format/Cells/Protection tab. -# Note the OOo docs include the "cell or style" bit -# in this bundle of attributes. -# This is incorrect; the bit is used in determining which bundles to use. -#
-- New in version 0.6.1 + #: 0 = according to context; 1 = left-to-right; 2 = right-to-left + text_direction = 0 class XFProtection(BaseObject, EqNeAttrs): + """ + A collection of the protection-related attributes of an ``XF`` record. + Items correspond to those in the Excel UI's Format -> Cells -> Protection tab. + Note the OOo docs include the "cell or style" bit in this bundle of + attributes. This is incorrect; the bit is used in determining which bundles + to use. + + .. versionadded:: 0.6.1 + """ - ## - # 1 = Cell is prevented from being changed, moved, resized, or deleted - # (only if the sheet is protected). + #: 1 = Cell is prevented from being changed, moved, resized, or deleted + #: (only if the sheet is protected). cell_locked = 0 - ## - # 1 = Hide formula so that it doesn't appear in the formula bar when - # the cell is selected (only if the sheet is protected). - formula_hidden = 0 -## -# eXtended Formatting information for cells, rows, columns and styles. -#
-- New in version 0.6.1 -# -#

Each of the 6 flags below describes the validity of -# a specific group of attributes. -#
-# In cell XFs, flag==0 means the attributes of the parent style XF are used, -# (but only if the attributes are valid there); flag==1 means the attributes -# of this XF are used.
-# In style XFs, flag==0 means the attribute setting is valid; flag==1 means -# the attribute should be ignored.
-# Note that the API -# provides both "raw" XFs and "computed" XFs -- in the latter case, cell XFs -# have had the above inheritance mechanism applied. -#

+ #: 1 = Hide formula so that it doesn't appear in the formula bar when + #: the cell is selected (only if the sheet is protected). + formula_hidden = 0 class XF(BaseObject): + """ + eXtended Formatting information for cells, rows, columns and styles. + + Each of the 6 flags below describes the validity of + a specific group of attributes. + + In cell XFs: + + - ``flag==0`` means the attributes of the parent style ``XF`` are + used, (but only if the attributes are valid there); + + - ``flag==1`` means the attributes of this ``XF`` are used. - ## - # 0 = cell XF, 1 = style XF + In style XFs: + + - ``flag==0`` means the attribute setting is valid; + - ``flag==1`` means the attribute should be ignored. + + .. note:: + the API provides both "raw" XFs and "computed" XFs. In the latter case, + cell XFs have had the above inheritance mechanism applied. + + .. versionadded:: 0.6.1 + """ + + #: 0 = cell XF, 1 = style XF is_style = 0 - ## - # cell XF: Index into Book.xf_list - # of this XF's style XF
- # style XF: 0xFFF + + #: cell XF: Index into Book.xf_list of this XF's style XF + #: + #: style XF: 0xFFF parent_style_index = 0 - ## + # _format_flag = 0 - ## + # _font_flag = 0 - ## + # _alignment_flag = 0 - ## + # _border_flag = 0 - ## + # _background_flag = 0 - ## - #   + _protection_flag = 0 - ## - # Index into Book.xf_list + + #: Index into :attr:`~xlrd.book.Book.xf_list` xf_index = 0 - ## - # Index into Book.font_list + + #: Index into :attr:`~xlrd.book.Book.font_list` font_index = 0 - ## - # Key into Book.format_map - #

- # Warning: OOo docs on the XF record call this "Index to FORMAT record". - # It is not an index in the Python sense. It is a key to a map. - # It is true only for Excel 4.0 and earlier files - # that the key into format_map from an XF instance - # is the same as the index into format_list, and only - # if the index is less than 164. - #

+ + #: Key into :attr:`~xlrd.book.Book.format_map` + #: + #: .. warning:: + #: OOo docs on the XF record call this "Index to FORMAT record". + #: It is not an index in the Python sense. It is a key to a map. + #: It is true *only* for Excel 4.0 and earlier files + #: that the key into format_map from an XF instance + #: is the same as the index into format_list, and *only* + #: if the index is less than 164. format_key = 0 - ## - # An instance of an XFProtection object. + + #: An instance of an :class:`XFProtection` object. protection = None - ## - # An instance of an XFBackground object. + + #: An instance of an :class:`XFBackground` object. background = None - ## - # An instance of an XFAlignment object. + + #: An instance of an :class:`XFAlignment` object. alignment = None - ## - # An instance of an XFBorder object. + + #: An instance of an :class:`XFBorder` object. border = None diff --git a/SUEWSPrepare/Modules/xlrd/formula.py b/SUEWSPrepare/Modules/xlrd/formula.py index 7c56aa4..e26639b 100644 --- a/SUEWSPrepare/Modules/xlrd/formula.py +++ b/SUEWSPrepare/Modules/xlrd/formula.py @@ -1,21 +1,24 @@ -# -*- coding: cp1252 -*- +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. +# No part of the content of this file was derived from the works of +# David Giffin. +""" +Module for parsing/evaluating Microsoft Excel formulas. +""" + +from __future__ import print_function -## -# Module for parsing/evaluating Microsoft Excel formulas. -# -#

Copyright 2005-2012 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under -# a BSD-style licence.

-## - -# No part of the content of this file was derived from the works of David Giffin. - -from __future__ import print_function import copy +import operator as opr from struct import unpack + +from .biffh import ( + BaseObject, XLRDError, error_text_from_code, hex_char_dump, + unpack_string_update_pos, unpack_unicode_update_pos, +) from .timemachine import * -from .biffh import unpack_unicode_update_pos, unpack_string_update_pos, \ - XLRDError, hex_char_dump, error_text_from_code, BaseObject __all__ = [ 'oBOOL', 'oERR', 'oNUM', 'oREF', 'oREL', 'oSTRG', 'oUNK', @@ -30,7 +33,8 @@ 'FMLA_TYPE_COND_FMT', 'FMLA_TYPE_DATA_VAL', 'FMLA_TYPE_NAME', - ] + 'Operand', 'Ref3D', +] FMLA_TYPE_CELL = 1 FMLA_TYPE_SHARED = 2 @@ -48,7 +52,7 @@ 8 : 'COND-FMT', 16: 'DATA-VAL', 32: 'NAME', - } +} _TOKEN_NOT_ALLOWED = { 0x01: ALL_FMLA_TYPES - FMLA_TYPE_CELL, # tExp @@ -64,7 +68,7 @@ 0x2C: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tRefN 0x2D: FMLA_TYPE_CELL + FMLA_TYPE_ARRAY, # tAreaN # plus weird stuff like tMem* - }.get +}.get oBOOL = 3 oERR = 4 @@ -84,7 +88,7 @@ 3 : "oBOOL", 4 : "oERR", 5 : "oMSNG", - } +} listsep = ',' #### probably should depend on locale @@ -108,7 +112,7 @@ 50 : sztab3, 70 : sztab3, 80 : sztab4, - } +} # For debugging purposes ... the name for each opcode # (without the prefix "t" used on OOo docs) @@ -376,7 +380,7 @@ 377: ('ROUNDBAHTUP', 1, 1, 0x02, 1, 'V', 'V'), 378: ('THAIYEAR', 1, 1, 0x02, 1, 'V', 'V'), 379: ('RTD', 2, 5, 0x04, 1, 'V', 'V'), - } +} tAttrNames = { 0x00: "Skip??", # seen in SAMPLES.XLS which shipped with Excel 5.0 @@ -388,7 +392,7 @@ 0x20: "Assign", 0x40: "Space", 0x41: "SpaceVolatile", - } +} error_opcodes = set([0x07, 0x08, 0x0A, 0x0B, 0x1C, 0x1D, 0x2F]) @@ -396,10 +400,10 @@ tIsectFuncs = (max, min, max, min, max, min) def do_box_funcs(box_funcs, boxa, boxb): - return tuple([ + return tuple( func(numa, numb) for func, numa, numb in zip(box_funcs, boxa.coords, boxb.coords) - ]) + ) def adjust_cell_addr_biff8(rowval, colval, reldelta, browx=None, bcolx=None): row_rel = (colval >> 15) & 1 @@ -469,7 +473,7 @@ def get_externsheet_local_range(bk, refx, blah=0): try: info = bk._externsheet_info[refx] except IndexError: - print("!!! get_externsheet_local_range: refx=%d, not in range(%d)" \ + print("!!! get_externsheet_local_range: refx=%d, not in range(%d)" % (refx, len(bk._externsheet_info)), file=bk.logfile) return (-101, -101) ref_recordx, ref_first_sheetx, ref_last_sheetx = info @@ -513,7 +517,7 @@ def get_externsheet_local_range_b57( nsheets = len(bk._all_sheets_map) if not(0 <= ref_first_sheetx <= ref_last_sheetx < nsheets): if blah: - print("/// get_externsheet_local_range_b57(%d, %d, %d) -> ???" \ + print("/// get_externsheet_local_range_b57(%d, %d, %d) -> ???" % (raw_extshtx, ref_first_sheetx, ref_last_sheetx), file=bk.logfile) print("--- first/last sheet not in range(%d)" % nsheets, file=bk.logfile) return (-103, -103) # stuffed up somewhere :-( @@ -527,80 +531,80 @@ class FormulaError(Exception): pass -## -# Used in evaluating formulas. -# The following table describes the kinds and how their values -# are represented.

-# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -#
Kind symbolKind numberValue representation
oBOOL3integer: 0 => False; 1 => True
oERR4None, or an int error code (same as XL_CELL_ERROR in the Cell class). -#
oMSNG5Used by Excel as a placeholder for a missing (not supplied) function -# argument. Should *not* appear as a final formula result. Value is None.
oNUM2A float. Note that there is no way of distinguishing dates.
oREF-1The value is either None or a non-empty list of -# absolute Ref3D instances.
-#
oREL-2The value is None or a non-empty list of -# fully or partially relative Ref3D instances. -#
oSTRG1A Unicode string.
oUNK0The kind is unknown or ambiguous. The value is None
-#

- class Operand(object): + """ + Used in evaluating formulas. + The following table describes the kinds and how their values + are represented. + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Kind symbolKind numberValue representation
oBOOL3integer: 0 => False; 1 => True
oERR4None, or an int error code (same as XL_CELL_ERROR in the Cell class). +
oMSNG5Used by Excel as a placeholder for a missing (not supplied) function + argument. Should *not* appear as a final formula result. Value is None.
oNUM2A float. Note that there is no way of distinguishing dates.
oREF-1The value is either None or a non-empty list of + absolute Ref3D instances.
+
oREL-2The value is None or a non-empty list of + fully or partially relative Ref3D instances. +
oSTRG1A Unicode string.
oUNK0The kind is unknown or ambiguous. The value is None
+ """ - ## - # None means that the actual value of the operand is a variable - # (depends on cell data), not a constant. + #: None means that the actual value of the operand is a variable + #: (depends on cell data), not a constant. value = None - ## - # oUNK means that the kind of operand is not known unambiguously. + + #: oUNK means that the kind of operand is not known unambiguously. kind = oUNK - ## - # The reconstituted text of the original formula. Function names will be - # in English irrespective of the original language, which doesn't seem - # to be recorded anywhere. The separator is ",", not ";" or whatever else - # might be more appropriate for the end-user's locale; patches welcome. + + #: The reconstituted text of the original formula. Function names will be + #: in English irrespective of the original language, which doesn't seem + #: to be recorded anywhere. The separator is ",", not ";" or whatever else + #: might be more appropriate for the end-user's locale; patches welcome. text = '?' def __init__(self, akind=None, avalue=None, arank=0, atext='?'): @@ -618,40 +622,55 @@ def __repr__(self): return "Operand(kind=%s, value=%r, text=%r)" \ % (kind_text, self.value, self.text) -## -#

Represents an absolute or relative 3-dimensional reference to a box -# of one or more cells.
-# -- New in version 0.6.0 -#

-# -#

The coords attribute is a tuple of the form:
-# (shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi)
-# where 0 <= thingxlo <= thingx < thingxhi.
-# Note that it is quite possible to have thingx > nthings; for example -# Print_Titles could have colxhi == 256 and/or rowxhi == 65536 -# irrespective of how many columns/rows are actually used in the worksheet. -# The caller will need to decide how to handle this situation. -# Keyword: IndexError :-) -#

-# -#

The components of the coords attribute are also available as individual -# attributes: shtxlo, shtxhi, rowxlo, rowxhi, colxlo, and colxhi.

-# -#

The relflags attribute is a 6-tuple of flags which indicate whether -# the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0).
-# Note that there is necessarily no information available as to what cell(s) -# the reference could possibly be relative to. The caller must decide what if -# any use to make of oREL operands. Note also that a partially relative -# reference may well be a typo. -# For example, define name A1Z10 as $a$1:$z10 (missing $ after z) -# while the cursor is on cell Sheet3!A27.
-# The resulting Ref3D instance will have coords = (2, 3, 0, -16, 0, 26) -# and relflags = (0, 0, 0, 1, 0, 0).
-# So far, only one possibility of a sheet-relative component in -# a reference has been noticed: a 2D reference located in the "current sheet". -#
This will appear as coords = (0, 1, ...) and relflags = (1, 1, ...). class Ref3D(tuple): + """ + Represents an absolute or relative 3-dimensional reference to a box + of one or more cells. + + The ``coords`` attribute is a tuple of the form:: + + (shtxlo, shtxhi, rowxlo, rowxhi, colxlo, colxhi) + + where ``0 <= thingxlo <= thingx < thingxhi``. + + .. note:: + It is quite possible to have ``thingx > nthings``; for example + ``Print_Titles`` could have ``colxhi == 256`` and/or ``rowxhi == 65536`` + irrespective of how many columns/rows are actually used in the worksheet. + The caller will need to decide how to handle this situation. + Keyword: :class:`IndexError` :-) + + The components of the coords attribute are also available as individual + attributes: ``shtxlo``, ``shtxhi``, ``rowxlo``, ``rowxhi``, ``colxlo``, and + ``colxhi``. + + The ``relflags`` attribute is a 6-tuple of flags which indicate whether + the corresponding (sheet|row|col)(lo|hi) is relative (1) or absolute (0). + + .. note:: + There is necessarily no information available as to what cell(s) + the reference could possibly be relative to. The caller must decide what + if any use to make of ``oREL`` operands. + + .. note: + A partially relative reference may well be a typo. + For example, define name ``A1Z10`` as ``$a$1:$z10`` (missing ``$`` after + ``z``) while the cursor is on cell ``Sheet3!A27``. + + The resulting :class:`Ref3D` instance will have + ``coords = (2, 3, 0, -16, 0, 26)`` + and ``relflags = (0, 0, 0, 1, 0, 0).
+ + So far, only one possibility of a sheet-relative component in + a reference has been noticed: a 2D reference located in the + "current sheet". + + This will appear as ``coords = (0, 1, ...)`` and + ``relflags = (1, 1, ...)``. + + .. versionadded:: 0.6.0 + """ def __init__(self, atuple): self.coords = atuple[0:6] @@ -677,7 +696,6 @@ def __repr__(self): tConcat = 0x08 tLT, tLE, tEQ, tGE, tGT, tNE = range(0x09, 0x0F) -import operator as opr def nop(x): return x @@ -692,8 +710,8 @@ def _opr_gt(x, y): return x > y def _opr_ne(x, y): return x != y def num2strg(num): - """Attempt to emulate Excel's default conversion - from number to string. + """ + Attempt to emulate Excel's default conversion from number to string. """ s = str(num) if s.endswith(".0"): @@ -717,13 +735,13 @@ def num2strg(num): tGE: (_cmp_argdict, oBOOL, _opr_ge, 10, '>='), tGT: (_cmp_argdict, oBOOL, _opr_gt, 10, '>'), tNE: (_cmp_argdict, oBOOL, _opr_ne, 10, '<>'), - } +} unop_rules = { 0x13: (lambda x: -x, 70, '-', ''), # unary minus 0x12: (lambda x: x, 70, '+', ''), # unary plus 0x14: (lambda x: x / 100.0, 60, '', '%'),# percent - } +} LEAF_RANK = 90 FUNC_RANK = 90 @@ -739,7 +757,7 @@ def evaluate_name_formula(bk, nobj, namex, blah=0, level=0): bv = bk.biff_version reldelta = 1 # All defined name formulas use "Method B" [OOo docs] if blah: - print("::: evaluate_name_formula %r %r %d %d %r level=%d" \ + print("::: evaluate_name_formula %r %r %d %d %r level=%d" % (namex, nobj.name, fmlalen, bv, data, level), file=bk.logfile) hex_char_dump(data, 0, fmlalen, fout=bk.logfile) if level > STACK_PANIC_LEVEL: @@ -767,7 +785,7 @@ def do_binop(opcd, stk): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) resop = Operand(result_kind, None, rank, otext) try: bconv = argdict[bop.kind] @@ -797,7 +815,7 @@ def do_unaryop(opcode, result_kind, stk): aop.text, ')'[:aop.rank < rank], sym2, - ]) + ]) if val is not None: val = func(val) stk.append(Operand(result_kind, val, rank, otext)) @@ -821,7 +839,7 @@ def not_in_name_formula(op_arg, oname_arg): oname = onames[opx] # + [" RVA"][optype] sz = sztab[opx] if blah: - print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \ + print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" % (pos, op, oname, sz, opcode, optype), file=bk.logfile) print("Stack =", stack, file=bk.logfile) if sz == -2: @@ -851,7 +869,7 @@ def not_in_name_formula(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF) res.text = otext if bop.kind == oERR or aop.kind == oERR: @@ -901,7 +919,7 @@ def not_in_name_formula(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF, None, rank, otext) if bop.kind == oERR or aop.kind == oERR: res.kind = oERR @@ -932,7 +950,7 @@ def not_in_name_formula(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF, None, rank, otext) if bop.kind == oERR or aop.kind == oERR: res = oERR @@ -996,7 +1014,7 @@ def not_in_name_formula(op_arg, oname_arg): else: sz = 4 if blah: - print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \ + print(" subop=%02xh subname=t%s sz=%d nc=%02xh" % (subop, subname, sz, nc), file=bk.logfile) elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet assert bv < 50 @@ -1029,17 +1047,17 @@ def not_in_name_formula(op_arg, oname_arg): funcx = unpack("<" + " BH"[nb], data[pos+1:pos+1+nb])[0] func_attrs = func_defs.get(funcx, None) if not func_attrs: - print("*** formula/tFunc unknown FuncID:%d" \ + print("*** formula/tFunc unknown FuncID:%d" % funcx, file=bk.logfile) spush(unk_opnd) else: func_name, nargs = func_attrs[:2] if blah: - print(" FuncID=%d name=%s nargs=%d" \ + print(" FuncID=%d name=%s nargs=%d" % (funcx, func_name, nargs), file=bk.logfile) assert len(stack) >= nargs if nargs: - argtext = listsep.join([arg.text for arg in stack[-nargs:]]) + argtext = listsep.join(arg.text for arg in stack[-nargs:]) otext = "%s(%s)" % (func_name, argtext) del stack[-nargs:] else: @@ -1052,22 +1070,22 @@ def not_in_name_formula(op_arg, oname_arg): prompt, nargs = divmod(nargs, 128) macro, funcx = divmod(funcx, 32768) if blah: - print(" FuncID=%d nargs=%d macro=%d prompt=%d" \ + print(" FuncID=%d nargs=%d macro=%d prompt=%d" % (funcx, nargs, macro, prompt), file=bk.logfile) func_attrs = func_defs.get(funcx, None) if not func_attrs: - print("*** formula/tFuncVar unknown FuncID:%d" \ + print("*** formula/tFuncVar unknown FuncID:%d" % funcx, file=bk.logfile) spush(unk_opnd) else: func_name, minargs, maxargs = func_attrs[:3] if blah: - print(" name: %r, min~max args: %d~%d" \ + print(" name: %r, min~max args: %d~%d" % (func_name, minargs, maxargs), file=bk.logfile) assert minargs <= nargs <= maxargs assert len(stack) >= nargs assert len(stack) >= nargs - argtext = listsep.join([arg.text for arg in stack[-nargs:]]) + argtext = listsep.join(arg.text for arg in stack[-nargs:]) otext = "%s(%s)" % (func_name, argtext) res = Operand(oUNK, None, FUNC_RANK, otext) if funcx == 1: # IF @@ -1110,14 +1128,13 @@ def not_in_name_formula(op_arg, oname_arg): if not tgtobj.evaluated: ### recursive ### evaluate_name_formula(bk, tgtobj, tgtnamex, blah, level+1) - if tgtobj.macro or tgtobj.binary \ - or tgtobj.any_err: + if tgtobj.macro or tgtobj.binary or tgtobj.any_err: if blah: tgtobj.dump( bk.logfile, header="!!! tgtobj has problems!!!", footer="----------- --------", - ) + ) res = Operand(oUNK, None) any_err = any_err or tgtobj.macro or tgtobj.binary or tgtobj.any_err any_rel = any_rel or tgtobj.any_rel @@ -1188,8 +1205,7 @@ def not_in_name_formula(op_arg, oname_arg): shx1, shx2 = get_externsheet_local_range(bk, refx, blah) else: res = get_cell_addr(data, pos+15, bv, reldelta) - raw_extshtx, raw_shx1, raw_shx2 = \ - unpack(" STACK_ALARM_LEVEL: blah = 1 reldelta = fmlatype in (FMLA_TYPE_SHARED, FMLA_TYPE_NAME, FMLA_TYPE_COND_FMT, FMLA_TYPE_DATA_VAL) data = fmla bv = bk.biff_version if blah: - print("::: decompile_formula len=%d fmlatype=%r browx=%r bcolx=%r reldelta=%d %r level=%d" \ + print("::: decompile_formula len=%d fmlatype=%r browx=%r bcolx=%r reldelta=%d %r level=%d" % (fmlalen, fmlatype, browx, bcolx, reldelta, data, level), file=bk.logfile) hex_char_dump(data, 0, fmlalen, fout=bk.logfile) if level > STACK_PANIC_LEVEL: @@ -1362,7 +1376,6 @@ def decompile_formula(bk, fmla, fmlalen, stack = [] any_rel = 0 any_err = 0 - any_external = 0 unk_opnd = Operand(oUNK, None) error_opnd = Operand(oERR, None) spush = stack.append @@ -1380,7 +1393,7 @@ def do_binop(opcd, stk): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) resop = Operand(result_kind, None, rank, otext) stk.append(resop) @@ -1394,7 +1407,7 @@ def do_unaryop(opcode, result_kind, stk): aop.text, ')'[:aop.rank < rank], sym2, - ]) + ]) stk.append(Operand(result_kind, None, rank, otext)) def unexpected_opcode(op_arg, oname_arg): @@ -1417,7 +1430,7 @@ def unexpected_opcode(op_arg, oname_arg): oname = onames[opx] # + [" RVA"][optype] sz = sztab[opx] if blah: - print("Pos:%d Op:0x%02x opname:t%s Sz:%d opcode:%02xh optype:%02xh" \ + print("Pos:%d Op:0x%02x opname:t%s Sz:%d opcode:%02xh optype:%02xh" % (pos, op, oname, sz, opcode, optype), file=bk.logfile) print("Stack =", stack, file=bk.logfile) if sz == -2: @@ -1458,7 +1471,7 @@ def unexpected_opcode(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF) res.text = otext if bop.kind == oERR or aop.kind == oERR: @@ -1494,7 +1507,7 @@ def unexpected_opcode(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF, None, rank, otext) if bop.kind == oERR or aop.kind == oERR: res.kind = oERR @@ -1521,7 +1534,7 @@ def unexpected_opcode(op_arg, oname_arg): '('[:bop.rank < rank], bop.text, ')'[:bop.rank < rank], - ]) + ]) res = Operand(oREF, None, rank, otext) if bop.kind == oERR or aop.kind == oERR: res = oERR @@ -1569,7 +1582,7 @@ def unexpected_opcode(op_arg, oname_arg): else: sz = 4 if blah: - print(" subop=%02xh subname=t%s sz=%d nc=%02xh" \ + print(" subop=%02xh subname=t%s sz=%d nc=%02xh" % (subop, subname, sz, nc), file=bk.logfile) elif 0x1A <= opcode <= 0x1B: # tSheet, tEndSheet assert bv < 50 @@ -1607,11 +1620,11 @@ def unexpected_opcode(op_arg, oname_arg): else: func_name, nargs = func_attrs[:2] if blah: - print(" FuncID=%d name=%s nargs=%d" \ + print(" FuncID=%d name=%s nargs=%d" % (funcx, func_name, nargs), file=bk.logfile) assert len(stack) >= nargs if nargs: - argtext = listsep.join([arg.text for arg in stack[-nargs:]]) + argtext = listsep.join(arg.text for arg in stack[-nargs:]) otext = "%s(%s)" % (func_name, argtext) del stack[-nargs:] else: @@ -1624,7 +1637,7 @@ def unexpected_opcode(op_arg, oname_arg): prompt, nargs = divmod(nargs, 128) macro, funcx = divmod(funcx, 32768) if blah: - print(" FuncID=%d nargs=%d macro=%d prompt=%d" \ + print(" FuncID=%d nargs=%d macro=%d prompt=%d" % (funcx, nargs, macro, prompt), file=bk.logfile) #### TODO #### if funcx == 255: # call add-in function if funcx == 255: @@ -1632,18 +1645,18 @@ def unexpected_opcode(op_arg, oname_arg): else: func_attrs = func_defs.get(funcx, None) if not func_attrs: - print("*** formula/tFuncVar unknown FuncID:%d" \ + print("*** formula/tFuncVar unknown FuncID:%d" % funcx, file=bk.logfile) spush(unk_opnd) else: func_name, minargs, maxargs = func_attrs[:3] if blah: - print(" name: %r, min~max args: %d~%d" \ + print(" name: %r, min~max args: %d~%d" % (func_name, minargs, maxargs), file=bk.logfile) assert minargs <= nargs <= maxargs assert len(stack) >= nargs assert len(stack) >= nargs - argtext = listsep.join([arg.text for arg in stack[-nargs:]]) + argtext = listsep.join(arg.text for arg in stack[-nargs:]) otext = "%s(%s)" % (func_name, argtext) res = Operand(oUNK, None, FUNC_RANK, otext) del stack[-nargs:] @@ -1736,8 +1749,7 @@ def unexpected_opcode(op_arg, oname_arg): shx1, shx2 = get_externsheet_local_range(bk, refx, blah) else: res = get_cell_addr(data, pos+15, bv, reldelta, browx, bcolx) - raw_extshtx, raw_shx1, raw_shx2 = \ - unpack("> bk.logfile, "!!!! Self-referential !!!!" @@ -1859,7 +1870,7 @@ def unexpected_opcode(op_arg, oname_arg): pos += sz any_rel = not not any_rel if blah: - print("End of formula. level=%d any_rel=%d any_err=%d stack=%r" % \ + print("End of formula. level=%d any_rel=%d any_err=%d stack=%r" % (level, not not any_rel, any_err, stack), file=bk.logfile) if len(stack) >= 2: print("*** Stack has unprocessed args", file=bk.logfile) @@ -1895,7 +1906,7 @@ def dump_formula(bk, data, fmlalen, bv, reldelta, blah=0, isname=0): sz = sztab[opx] if blah: - print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" \ + print("Pos:%d Op:0x%02x Name:t%s Sz:%d opcode:%02xh optype:%02xh" % (pos, op, oname, sz, opcode, optype), file=bk.logfile) if not optype: if 0x01 <= opcode <= 0x02: # tExp, tTbl @@ -2023,7 +2034,7 @@ def dump_formula(bk, data, fmlalen, bv, reldelta, blah=0, isname=0): return pos += sz if blah: - print("End of formula. any_rel=%d any_err=%d stack=%r" % \ + print("End of formula. any_rel=%d any_err=%d stack=%r" % (not not any_rel, any_err, stack), file=bk.logfile) if len(stack) >= 2: print("*** Stack has unprocessed args", file=bk.logfile) @@ -2064,16 +2075,12 @@ def colnamerel(colx, colxrel, bcolx=None, r1c1=0): return "C" return colname((bcolx + colx) % 256) -## -# Utility function: (5, 7) => 'H6' def cellname(rowx, colx): - """ (5, 7) => 'H6' """ + """Utility function: ``(5, 7)`` => ``'H6'``""" return "%s%d" % (colname(colx), rowx+1) -## -# Utility function: (5, 7) => '$H$6' def cellnameabs(rowx, colx, r1c1=0): - """ (5, 7) => '$H$6' or 'R8C6'""" + """Utility function: ``(5, 7)`` => ``'$H$6'``""" if r1c1: return "R%dC%d" % (rowx+1, colx+1) return "$%s$%d" % (colname(colx), rowx+1) @@ -2090,10 +2097,8 @@ def cellnamerel(rowx, colx, rowxrel, colxrel, browx=None, bcolx=None, r1c1=0): return r + c return c + r -## -# Utility function: 7 => 'H', 27 => 'AB' def colname(colx): - """ 7 => 'H', 27 => 'AB' """ + """Utility function: ``7`` => ``'H'``, ``27`` => ``'AB'``""" alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" if colx <= 25: return alphabet[colx] @@ -2102,7 +2107,7 @@ def colname(colx): return alphabet[xdiv26 - 1] + alphabet[xmod26] def rangename2d(rlo, rhi, clo, chi, r1c1=0): - """ (5, 20, 7, 10) => '$H$6:$J$20' """ + """ ``(5, 20, 7, 10)`` => ``'$H$6:$J$20'`` """ if r1c1: return if rhi == rlo+1 and chi == clo+1: @@ -2118,25 +2123,31 @@ def rangename2drel(rlo_rhi_clo_chi, rlorel_rhirel_clorel_chirel, browx=None, bco r1c1 = True return "%s:%s" % ( cellnamerel(rlo, clo, rlorel, clorel, browx, bcolx, r1c1), - cellnamerel(rhi-1, chi-1, rhirel, chirel, browx, bcolx, r1c1) - ) -## -# Utility function: -#
Ref3D((1, 4, 5, 20, 7, 10)) => 'Sheet2:Sheet3!$H$6:$J$20' + cellnamerel(rhi-1, chi-1, rhirel, chirel, browx, bcolx, r1c1), + ) + + def rangename3d(book, ref3d): - """ Ref3D(1, 4, 5, 20, 7, 10) => 'Sheet2:Sheet3!$H$6:$J$20' - (assuming Excel's default sheetnames) """ + """ + Utility function: + ``Ref3D(1, 4, 5, 20, 7, 10)`` => + ``'Sheet2:Sheet3!$H$6:$J$20'`` + (assuming Excel's default sheetnames) + """ coords = ref3d.coords return "%s!%s" % ( sheetrange(book, *coords[:2]), rangename2d(*coords[2:6])) -## -# Utility function: -#
Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1)) -# R1C1 mode => 'Sheet1!R[-32]C[-13]:R[-23]C[12]' -# A1 mode => depends on base cell (browx, bcolx) def rangename3drel(book, ref3d, browx=None, bcolx=None, r1c1=0): + """ + Utility function: + ``Ref3D(coords=(0, 1, -32, -22, -13, 13), relflags=(0, 0, 1, 1, 1, 1))`` + + In R1C1 mode => ``'Sheet1!R[-32]C[-13]:R[-23]C[12]'`` + + In A1 mode => depends on base cell ``(browx, bcolx)`` + """ coords = ref3d.coords relflags = ref3d.relflags shdesc = sheetrangerel(book, coords[:2], relflags[:2]) @@ -2154,7 +2165,7 @@ def quotedsheetname(shnames, shx): -2: "internal; deleted sheet", -3: "internal; macro sheet", -4: "<>", - }.get(shx, "?error %d?" % shx) + }.get(shx, "?error %d?" % shx) if "'" in shname: return "'" + shname.replace("'", "''") + "'" if " " in shname: diff --git a/SUEWSPrepare/Modules/xlrd/info.py b/SUEWSPrepare/Modules/xlrd/info.py index a57a784..73a1685 100644 --- a/SUEWSPrepare/Modules/xlrd/info.py +++ b/SUEWSPrepare/Modules/xlrd/info.py @@ -1 +1 @@ -__VERSION__ = "0.9.4" +__version__ = __VERSION__ = "1.1.0" diff --git a/SUEWSPrepare/Modules/xlrd/licences.py b/SUEWSPrepare/Modules/xlrd/licences.py deleted file mode 100644 index 1e262a9..0000000 --- a/SUEWSPrepare/Modules/xlrd/licences.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: cp1252 -*- - -""" -Portions copyright 2005-2009, Stephen John Machin, Lingfo Pty Ltd -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. None of the names of Stephen John Machin, Lingfo Pty Ltd and any -contributors may be used to endorse or promote products derived from this -software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS -BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. -""" - -""" -/*- - * Copyright (c) 2001 David Giffin. - * All rights reserved. - * - * Based on the the Java version: Andrew Khan Copyright (c) 2000. - * - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by - * David Giffin ." - * - * 4. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by - * David Giffin ." - * - * THIS SOFTWARE IS PROVIDED BY DAVID GIFFIN ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID GIFFIN OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - */ -""" diff --git a/SUEWSPrepare/Modules/xlrd/sheet.py b/SUEWSPrepare/Modules/xlrd/sheet.py index 721d17c..79c200d 100644 --- a/SUEWSPrepare/Modules/xlrd/sheet.py +++ b/SUEWSPrepare/Modules/xlrd/sheet.py @@ -1,40 +1,20 @@ -# -*- coding: cp1252 -*- - -## -#

Portions copyright 2005-2013 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a BSD-style licence.

-## - -# 2010-04-25 SJM fix zoom factors cooking logic -# 2010-04-15 CW r4253 fix zoom factors cooking logic -# 2010-04-09 CW r4248 add a flag so xlutils knows whether or not to write a PANE record -# 2010-03-29 SJM Fixed bug in adding new empty rows in put_cell_ragged -# 2010-03-28 SJM Tailored put_cell method for each of ragged_rows=False (fixed speed regression) and =True (faster) -# 2010-03-25 CW r4236 Slight refactoring to remove method calls -# 2010-03-25 CW r4235 Collapse expand_cells into put_cell and enhance the raggedness. This should save even more memory! -# 2010-03-25 CW r4234 remove duplicate chunks for extend_cells; refactor to remove put_number_cell and put_blank_cell which essentially duplicated the code of put_cell -# 2010-03-10 SJM r4222 Added reading of the PANE record. -# 2010-03-10 SJM r4221 Preliminary work on "cooked" mag factors; use at own peril -# 2010-03-01 SJM Reading SCL record -# 2010-03-01 SJM Added ragged_rows functionality -# 2009-08-23 SJM Reduced CPU time taken by parsing MULBLANK records. -# 2009-08-18 SJM Used __slots__ and sharing to reduce memory consumed by Rowinfo instances -# 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file -# 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo -# 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes -# 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files. -# 2007-10-11 SJM Added missing entry for blank cell type to ctype_text -# 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file -# 2007-04-22 SJM Remove experimental "trimming" facility. +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2013 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. from __future__ import print_function from array import array -from struct import unpack, calcsize +from struct import calcsize, unpack + from .biffh import * +from .formatting import Format, nearest_colour_index +from .formula import ( + FMLA_TYPE_CELL, FMLA_TYPE_SHARED, decompile_formula, dump_formula, + rangename2d, +) from .timemachine import * -from .formula import dump_formula, decompile_formula, rangename2d, FMLA_TYPE_CELL, FMLA_TYPE_SHARED -from .formatting import nearest_colour_index, Format DEBUG = 0 OBJ_MSO_DEBUG = 0 @@ -63,243 +43,279 @@ # The real thing is the visibility attribute from the BOUNDSHEET record. ("sheet_visible", 0), ("show_in_page_break_preview", 0), - ) +) -## -#

Contains the data for one worksheet.

-# -#

In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a -# column index, counting from zero. -# Negative values for row/column indexes and slice positions are supported in the expected fashion.

-# -#

For information about cell types and cell values, refer to the documentation of the {@link #Cell} class.

-# -#

WARNING: You don't call this class yourself. You access Sheet objects via the Book object that -# was returned when you called xlrd.open_workbook("myfile.xls").

class Sheet(BaseObject): - ## - # Name of sheet. + """ + Contains the data for one worksheet. + + In the cell access functions, ``rowx`` is a row index, counting from + zero, and ``colx`` is a column index, counting from zero. + Negative values for row/column indexes and slice positions are supported in + the expected fashion. + + For information about cell types and cell values, refer to the documentation + of the :class:`Cell` class. + + .. warning:: + + You don't instantiate this class yourself. You access :class:`Sheet` + objects via the :class:`~xlrd.book.Book` object that + was returned when you called :func:`xlrd.open_workbook`. + """ + + #: Name of sheet. name = '' - ## - # A reference to the Book object to which this sheet belongs. - # Example usage: some_sheet.book.datemode + #: A reference to the :class:`~xlrd.book.Book` object to which this sheet + #: belongs. + #: + #: Example usage: ``some_sheet.book.datemode`` book = None - - ## - # Number of rows in sheet. A row index is in range(thesheet.nrows). + + #: Number of rows in sheet. A row index is in ``range(thesheet.nrows)``. nrows = 0 - ## - # Nominal number of columns in sheet. It is 1 + the maximum column index - # found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?) - # and Sheet.{@link #Sheet.row_len}(row_index). + #: Nominal number of columns in sheet. It is one more than the maximum + #: column index found, ignoring trailing empty cells. + #: See also the ``ragged_rows`` parameter to :func:`~xlrd.open_workbook` + #: and :meth:`~xlrd.sheet.Sheet.row_len`. ncols = 0 - ## - # The map from a column index to a {@link #Colinfo} object. Often there is an entry - # in COLINFO records for all column indexes in range(257). - # Note that xlrd ignores the entry for the non-existent - # 257th column. On the other hand, there may be no entry for unused columns. - #
-- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). + + #: The map from a column index to a :class:`Colinfo` object. Often there is + #: an entry in ``COLINFO`` records for all column indexes in ``range(257)``. + #: + #: .. note:: + #: xlrd ignores the entry for the non-existent + #: 257th column. + #: + #: On the other hand, there may be no entry for unused columns. + #: + #: .. versionadded:: 0.6.1 + #: + #: Populated only if ``open_workbook(..., formatting_info=True)`` colinfo_map = {} - ## - # The map from a row index to a {@link #Rowinfo} object. Note that it is possible - # to have missing entries -- at least one source of XLS files doesn't - # bother writing ROW records. - #
-- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). + #: The map from a row index to a :class:`Rowinfo` object. + #: + #: ..note:: + #: It is possible to have missing entries -- at least one source of + #: XLS files doesn't bother writing ``ROW`` records. + #: + #: .. versionadded:: 0.6.1 + #: + #: Populated only if ``open_workbook(..., formatting_info=True)`` rowinfo_map = {} - ## - # List of address ranges of cells containing column labels. - # These are set up in Excel by Insert > Name > Labels > Columns. - #
-- New in version 0.6.0 - #
How to deconstruct the list: - #
-    # for crange in thesheet.col_label_ranges:
-    #     rlo, rhi, clo, chi = crange
-    #     for rx in xrange(rlo, rhi):
-    #         for cx in xrange(clo, chi):
-    #             print "Column label at (rowx=%d, colx=%d) is %r" \
-    #                 (rx, cx, thesheet.cell_value(rx, cx))
-    # 
+ #: List of address ranges of cells containing column labels. + #: These are set up in Excel by Insert > Name > Labels > Columns. + #: + #: .. versionadded:: 0.6.0 + #: + #: How to deconstruct the list: + #: + #: .. code-block:: python + #: + #: for crange in thesheet.col_label_ranges: + #: rlo, rhi, clo, chi = crange + #: for rx in xrange(rlo, rhi): + #: for cx in xrange(clo, chi): + #: print "Column label at (rowx=%d, colx=%d) is %r" \ + #: (rx, cx, thesheet.cell_value(rx, cx)) col_label_ranges = [] - ## - # List of address ranges of cells containing row labels. - # For more details, see col_label_ranges above. - #
-- New in version 0.6.0 + #: List of address ranges of cells containing row labels. + #: For more details, see :attr:`col_label_ranges`. + #: + #: .. versionadded:: 0.6.0 row_label_ranges = [] - ## - # List of address ranges of cells which have been merged. - # These are set up in Excel by Format > Cells > Alignment, then ticking - # the "Merge cells" box. - #
-- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True). - #
How to deconstruct the list: - #
-    # for crange in thesheet.merged_cells:
-    #     rlo, rhi, clo, chi = crange
-    #     for rowx in xrange(rlo, rhi):
-    #         for colx in xrange(clo, chi):
-    #             # cell (rlo, clo) (the top left one) will carry the data
-    #             # and formatting info; the remainder will be recorded as
-    #             # blank cells, but a renderer will apply the formatting info
-    #             # for the top left cell (e.g. border, pattern) to all cells in
-    #             # the range.
-    # 
+ #: List of address ranges of cells which have been merged. + #: These are set up in Excel by Format > Cells > Alignment, then ticking + #: the "Merge cells" box. + #: + #: .. note:: + #: The upper limits are exclusive: i.e. ``[2, 3, 7, 9]`` only + #: spans two cells. + #: + #: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.6.1 + #: + #: How to deconstruct the list: + #: + #: .. code-block:: python + #: + #: for crange in thesheet.merged_cells: + #: rlo, rhi, clo, chi = crange + #: for rowx in xrange(rlo, rhi): + #: for colx in xrange(clo, chi): + #: # cell (rlo, clo) (the top left one) will carry the data + #: # and formatting info; the remainder will be recorded as + #: # blank cells, but a renderer will apply the formatting info + #: # for the top left cell (e.g. border, pattern) to all cells in + #: # the range. merged_cells = [] - - ## - # Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset - # defines where in the string the font begins to be used. - # Offsets are expected to be in ascending order. - # If the first offset is not zero, the meaning is that the cell's XF's font should - # be used from offset 0. - #
This is a sparse mapping. There is no entry for cells that are not formatted with - # rich text. - #
How to use: - #
-    # runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
-    # if runlist:
-    #     for offset, font_index in runlist:
-    #         # do work here.
-    #         pass
-    # 
- # Populated only if open_workbook(formatting_info=True). - #
-- New in version 0.7.2. - #
  - rich_text_runlist_map = {} - - ## - # Default column width from DEFCOLWIDTH record, else None. - # From the OOo docs:
- # """Column width in characters, using the width of the zero character - # from default font (first FONT record in the file). Excel adds some - # extra space to the default width, depending on the default font and - # default font size. The algorithm how to exactly calculate the resulting - # column width is not known.
- # Example: The default width of 8 set in this record results in a column - # width of 8.43 using Arial font with a size of 10 points."""
- # For the default hierarchy, refer to the {@link #Colinfo} class. - #
-- New in version 0.6.1 + + #: Mapping of ``(rowx, colx)`` to list of ``(offset, font_index)`` tuples. + #: The offset defines where in the string the font begins to be used. + #: Offsets are expected to be in ascending order. + #: If the first offset is not zero, the meaning is that the cell's ``XF``'s + #: font should be used from offset 0. + #: + #: This is a sparse mapping. There is no entry for cells that are not + #: formatted with rich text. + #: + #: How to use: + #: + #: .. code-block:: python + #: + #: runlist = thesheet.rich_text_runlist_map.get((rowx, colx)) + #: if runlist: + #: for offset, font_index in runlist: + #: # do work here. + #: pass + #: + #: .. versionadded:: 0.7.2 + #: + #: Populated only if ``open_workbook(..., formatting_info=True)`` + rich_text_runlist_map = {} + + #: Default column width from ``DEFCOLWIDTH`` record, else ``None``. + #: From the OOo docs: + #: + #: Column width in characters, using the width of the zero character + #: from default font (first FONT record in the file). Excel adds some + #: extra space to the default width, depending on the default font and + #: default font size. The algorithm how to exactly calculate the resulting + #: column width is not known. + #: Example: The default width of 8 set in this record results in a column + #: width of 8.43 using Arial font with a size of 10 points. + #: + #: For the default hierarchy, refer to the :class:`Colinfo` class. + #: + #: .. versionadded:: 0.6.1 defcolwidth = None - ## - # Default column width from STANDARDWIDTH record, else None. - # From the OOo docs:
- # """Default width of the columns in 1/256 of the width of the zero - # character, using default font (first FONT record in the file)."""
- # For the default hierarchy, refer to the {@link #Colinfo} class. - #
-- New in version 0.6.1 + #: Default column width from ``STANDARDWIDTH`` record, else ``None``. + #: + #: From the OOo docs: + #: + #: Default width of the columns in 1/256 of the width of the zero + #: character, using default font (first FONT record in the file). + #: + #: For the default hierarchy, refer to the :class:`Colinfo` class. + #: + #: .. versionadded:: 0.6.1 standardwidth = None - ## - # Default value to be used for a row if there is - # no ROW record for that row. - # From the optional DEFAULTROWHEIGHT record. + #: Default value to be used for a row if there is + #: no ``ROW`` record for that row. + #: From the *optional* ``DEFAULTROWHEIGHT`` record. default_row_height = None - ## - # Default value to be used for a row if there is - # no ROW record for that row. - # From the optional DEFAULTROWHEIGHT record. + #: Default value to be used for a row if there is + #: no ``ROW`` record for that row. + #: From the *optional* ``DEFAULTROWHEIGHT`` record. default_row_height_mismatch = None - ## - # Default value to be used for a row if there is - # no ROW record for that row. - # From the optional DEFAULTROWHEIGHT record. + #: Default value to be used for a row if there is + #: no ``ROW`` record for that row. + #: From the *optional* ``DEFAULTROWHEIGHT`` record. default_row_hidden = None - ## - # Default value to be used for a row if there is - # no ROW record for that row. - # From the optional DEFAULTROWHEIGHT record. + #: Default value to be used for a row if there is + #: no ``ROW`` record for that row. + #: From the *optional* ``DEFAULTROWHEIGHT`` record. default_additional_space_above = None - ## - # Default value to be used for a row if there is - # no ROW record for that row. - # From the optional DEFAULTROWHEIGHT record. + #: Default value to be used for a row if there is + #: no ``ROW`` record for that row. + #: From the *optional* ``DEFAULTROWHEIGHT`` record. default_additional_space_below = None - ## - # Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden - # by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden - # only by VBA macro). + #: Visibility of the sheet:: + #: + #: 0 = visible + #: 1 = hidden (can be unhidden by user -- Format -> Sheet -> Unhide) + #: 2 = "very hidden" (can be unhidden only by VBA macro). visibility = 0 - ## - # A 256-element tuple corresponding to the contents of the GCW record for this sheet. - # If no such record, treat as all bits zero. - # Applies to BIFF4-7 only. See docs of the {@link #Colinfo} class for discussion. + #: A 256-element tuple corresponding to the contents of the GCW record for + #: this sheet. If no such record, treat as all bits zero. + #: Applies to BIFF4-7 only. See docs of the :class:`Colinfo` class for + #: discussion. gcw = (0, ) * 256 - ## - #

A list of {@link #Hyperlink} objects corresponding to HLINK records found - # in the worksheet.
-- New in version 0.7.2

+ #: A list of :class:`Hyperlink` objects corresponding to ``HLINK`` records + #: found in the worksheet. + #: + #: .. versionadded:: 0.7.2 hyperlink_list = [] - ## - #

A sparse mapping from (rowx, colx) to an item in {@link #Sheet.hyperlink_list}. - # Cells not covered by a hyperlink are not mapped. - # It is possible using the Excel UI to set up a hyperlink that - # covers a larger-than-1x1 rectangle of cells. - # Hyperlink rectangles may overlap (Excel doesn't check). - # When a multiply-covered cell is clicked on, the hyperlink that is activated - # (and the one that is mapped here) is the last in hyperlink_list. - #
-- New in version 0.7.2

+ #: A sparse mapping from ``(rowx, colx)`` to an item in + #: :attr:`~xlrd.sheet.Sheet.hyperlink_list`. + #: Cells not covered by a hyperlink are not mapped. + #: It is possible using the Excel UI to set up a hyperlink that + #: covers a larger-than-1x1 rectangle of cells. + #: Hyperlink rectangles may overlap (Excel doesn't check). + #: When a multiply-covered cell is clicked on, the hyperlink that is + #: activated + #: (and the one that is mapped here) is the last in + #: :attr:`~xlrd.sheet.Sheet.hyperlink_list`. + #: + #: .. versionadded:: 0.7.2 hyperlink_map = {} - ## - #

A sparse mapping from (rowx, colx) to a {@link #Note} object. - # Cells not containing a note ("comment") are not mapped. - #
-- New in version 0.7.2

- cell_note_map = {} - - ## - # Number of columns in left pane (frozen panes; for split panes, see comments below in code) + #: A sparse mapping from ``(rowx, colx)`` to a :class:`Note` object. + #: Cells not containing a note ("comment") are not mapped. + #: + #: .. versionadded:: 0.7.2 + cell_note_map = {} + + #: Number of columns in left pane (frozen panes; for split panes, see + #: comments in code) vert_split_pos = 0 - ## - # Number of rows in top pane (frozen panes; for split panes, see comments below in code) + #: Number of rows in top pane (frozen panes; for split panes, see comments + #: in code) horz_split_pos = 0 - ## - # Index of first visible row in bottom frozen/split pane + #: Index of first visible row in bottom frozen/split pane horz_split_first_visible = 0 - ## - # Index of first visible column in right frozen/split pane + #: Index of first visible column in right frozen/split pane vert_split_first_visible = 0 - ## - # Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs. + #: Frozen panes: ignore it. Split panes: explanation and diagrams in + #: OOo docs. split_active_pane = 0 - ## - # Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy + #: Boolean specifying if a ``PANE`` record was present, ignore unless you're + #: ``xlutils.copy`` has_pane_record = 0 - ## - # A list of the horizontal page breaks in this sheet. - # Breaks are tuples in the form (index of row after break, start col index, end col index). - # Populated only if open_workbook(formatting_info=True). - #
-- New in version 0.7.2 + #: A list of the horizontal page breaks in this sheet. + #: Breaks are tuples in the form + #: ``(index of row after break, start col index, end col index)``. + #: + #: Populated only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.7.2 horizontal_page_breaks = [] - ## - # A list of the vertical page breaks in this sheet. - # Breaks are tuples in the form (index of col after break, start row index, end row index). - # Populated only if open_workbook(formatting_info=True). - #
-- New in version 0.7.2 + #: A list of the vertical page breaks in this sheet. + #: Breaks are tuples in the form + #: ``(index of col after break, start row index, end row index)``. + #: + #: Populated only if ``open_workbook(..., formatting_info=True)`` + #: + #: .. versionadded:: 0.7.2 vertical_page_breaks = [] - def __init__(self, book, position, name, number): self.book = book self.biff_version = book.biff_version @@ -364,16 +380,13 @@ def __init__(self, book, position, name, number): self.cooked_normal_view_mag_factor = 100 # Values (if any) actually stored on the XLS file - self.cached_page_break_preview_mag_factor = None # from WINDOW2 record - self.cached_normal_view_mag_factor = None # from WINDOW2 record + self.cached_page_break_preview_mag_factor = 0 # default (60%), from WINDOW2 record + self.cached_normal_view_mag_factor = 0 # default (100%), from WINDOW2 record self.scl_mag_factor = None # from SCL record self._ixfe = None # BIFF2 only self._cell_attr_to_xfx = {} # BIFF2.0 only - #### Don't initialise this here, use class attribute initialisation. - #### self.gcw = (0, ) * 256 #### - if self.biff_version >= 80: self.utter_max_rows = 65536 else: @@ -387,10 +400,10 @@ def __init__(self, book, position, name, number): # self._put_cell_rows_appended = 0 # self._put_cell_cells_appended = 0 - - ## - # {@link #Cell} object in the given row and column. def cell(self, rowx, colx): + """ + :class:`Cell` object in the given row and column. + """ if self.formatting_info: xfx = self.cell_xf_index(rowx, colx) else: @@ -399,24 +412,27 @@ def cell(self, rowx, colx): self._cell_types[rowx][colx], self._cell_values[rowx][colx], xfx, - ) + ) - ## - # Value of the cell in the given row and column. def cell_value(self, rowx, colx): + "Value of the cell in the given row and column." return self._cell_values[rowx][colx] - ## - # Type of the cell in the given row and column. - # Refer to the documentation of the {@link #Cell} class. def cell_type(self, rowx, colx): + """ + Type of the cell in the given row and column. + + Refer to the documentation of the :class:`Cell` class. + """ return self._cell_types[rowx][colx] - ## - # XF index of the cell in the given row and column. - # This is an index into Book.{@link #Book.xf_list}. - #
-- New in version 0.6.1 def cell_xf_index(self, rowx, colx): + """ + XF index of the cell in the given row and column. + This is an index into :attr:`~xlrd.book.Book.xf_list`. + + .. versionadded:: 0.6.1 + """ self.req_fmt_info() xfx = self._cell_xf_indexes[rowx][colx] if xfx > -1: @@ -441,46 +457,49 @@ def cell_xf_index(self, rowx, colx): self._xf_index_stats[3] += 1 return 15 - ## - # Returns the effective number of cells in the given row. For use with - # open_workbook(ragged_rows=True) which is likely to produce rows - # with fewer than {@link #Sheet.ncols} cells. - #
-- New in version 0.7.2 def row_len(self, rowx): + """ + Returns the effective number of cells in the given row. For use with + ``open_workbook(ragged_rows=True)`` which is likely to produce rows + with fewer than :attr:`~Sheet.ncols` cells. + + .. versionadded:: 0.7.2 + """ return len(self._cell_values[rowx]) - ## - # Returns a sequence of the {@link #Cell} objects in the given row. def row(self, rowx): + """ + Returns a sequence of the :class:`Cell` objects in the given row. + """ return [ self.cell(rowx, colx) for colx in xrange(len(self._cell_values[rowx])) - ] + ] - ## - # Returns a generator for iterating through each row. def get_rows(self): + "Returns a generator for iterating through each row." return (self.row(index) for index in range(self.nrows)) - ## - # Returns a slice of the types - # of the cells in the given row. def row_types(self, rowx, start_colx=0, end_colx=None): + """ + Returns a slice of the types of the cells in the given row. + """ if end_colx is None: return self._cell_types[rowx][start_colx:] return self._cell_types[rowx][start_colx:end_colx] - ## - # Returns a slice of the values - # of the cells in the given row. def row_values(self, rowx, start_colx=0, end_colx=None): + """ + Returns a slice of the values of the cells in the given row. + """ if end_colx is None: return self._cell_values[rowx][start_colx:] return self._cell_values[rowx][start_colx:end_colx] - ## - # Returns a slice of the {@link #Cell} objects in the given row. def row_slice(self, rowx, start_colx=0, end_colx=None): + """ + Returns a slice of the :class:`Cell` objects in the given row. + """ nc = len(self._cell_values[rowx]) if start_colx < 0: start_colx += nc @@ -493,11 +512,12 @@ def row_slice(self, rowx, start_colx=0, end_colx=None): return [ self.cell(rowx, colx) for colx in xrange(start_colx, end_colx) - ] + ] - ## - # Returns a slice of the {@link #Cell} objects in the given column. def col_slice(self, colx, start_rowx=0, end_rowx=None): + """ + Returns a slice of the :class:`Cell` objects in the given column. + """ nr = self.nrows if start_rowx < 0: start_rowx += nr @@ -510,11 +530,12 @@ def col_slice(self, colx, start_rowx=0, end_rowx=None): return [ self.cell(rowx, colx) for rowx in xrange(start_rowx, end_rowx) - ] + ] - ## - # Returns a slice of the values of the cells in the given column. def col_values(self, colx, start_rowx=0, end_rowx=None): + """ + Returns a slice of the values of the cells in the given column. + """ nr = self.nrows if start_rowx < 0: start_rowx += nr @@ -527,11 +548,12 @@ def col_values(self, colx, start_rowx=0, end_rowx=None): return [ self._cell_values[rowx][colx] for rowx in xrange(start_rowx, end_rowx) - ] + ] - ## - # Returns a slice of the types of the cells in the given column. def col_types(self, colx, start_rowx=0, end_rowx=None): + """ + Returns a slice of the types of the cells in the given column. + """ nr = self.nrows if start_rowx < 0: start_rowx += nr @@ -544,13 +566,8 @@ def col_types(self, colx, start_rowx=0, end_rowx=None): return [ self._cell_types[rowx][colx] for rowx in xrange(start_rowx, end_rowx) - ] + ] - ## - # Returns a sequence of the {@link #Cell} objects in the given column. - def col(self, colx): - return self.col_slice(colx) - # Above two lines just for the docs. Here's the real McCoy: col = col_slice # === Following methods are used in building the worksheet. @@ -558,18 +575,18 @@ def col(self, colx): def tidy_dimensions(self): if self.verbosity >= 3: - fprintf(self.logfile, + fprintf( + self.logfile, "tidy_dimensions: nrows=%d ncols=%d \n", self.nrows, self.ncols, - ) + ) if 1 and self.merged_cells: nr = nc = 0 umaxrows = self.utter_max_rows umaxcols = self.utter_max_cols for crange in self.merged_cells: rlo, rhi, clo, chi = crange - if not (0 <= rlo < rhi <= umaxrows) \ - or not (0 <= clo < chi <= umaxcols): + if not (0 <= rlo < rhi <= umaxrows) or not (0 <= clo < chi <= umaxcols): fprintf(self.logfile, "*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n", self.number, self.name, crange) @@ -577,14 +594,16 @@ def tidy_dimensions(self): if chi > nc: nc = chi if nc > self.ncols: self.ncols = nc + self._first_full_rowx = -2 if nr > self.nrows: # we put one empty cell at (nr-1,0) to make sure # we have the right number of rows. The ragged rows # will sort out the rest if needed. - self.put_cell(nr-1, 0, XL_CELL_EMPTY, '', -1) - if self.verbosity >= 1 \ - and (self.nrows != self._dimnrows or self.ncols != self._dimncols): - fprintf(self.logfile, + self.put_cell(nr-1, 0, XL_CELL_EMPTY, UNICODE_LITERAL(''), -1) + if (self.verbosity >= 1 and + (self.nrows != self._dimnrows or self.ncols != self._dimncols)): + fprintf( + self.logfile, "NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n", self.number, self.name, @@ -592,7 +611,7 @@ def tidy_dimensions(self): self._dimncols, self.nrows, self.ncols, - ) + ) if not self.ragged_rows: # fix ragged rows ncols = self.ncols @@ -610,7 +629,7 @@ def tidy_dimensions(self): rlen = len(trow) nextra = ncols - rlen if nextra > 0: - s_cell_values[rowx][rlen:] = [''] * nextra + s_cell_values[rowx][rlen:] = [UNICODE_LITERAL('')] * nextra trow[rlen:] = self.bt * nextra if s_fmt_info: s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra @@ -659,11 +678,11 @@ def put_cell_ragged(self, rowx, colx, ctype, value, xf_index): num_empty += 1 # self._put_cell_row_widenings += 1 # types_row.extend(self.bt * num_empty) - # values_row.extend([''] * num_empty) + # values_row.extend([UNICODE_LITERAL('')] * num_empty) # if fmt_info: # fmt_row.extend(self.bf * num_empty) types_row[ltr:] = self.bt * num_empty - values_row[ltr:] = [''] * num_empty + values_row[ltr:] = [UNICODE_LITERAL('')] * num_empty if fmt_info: fmt_row[ltr:] = self.bf * num_empty types_row[colx] = ctype @@ -717,7 +736,7 @@ def put_cell_unragged(self, rowx, colx, ctype, value, xf_index): trow.extend(self.bt * nextra) if self.formatting_info: self._cell_xf_indexes[rowx].extend(self.bf * nextra) - self._cell_values[rowx].extend([''] * nextra) + self._cell_values[rowx].extend([UNICODE_LITERAL('')] * nextra) else: scta = self._cell_types.append scva = self._cell_values.append @@ -729,7 +748,7 @@ def put_cell_unragged(self, rowx, colx, ctype, value, xf_index): for _unused in xrange(self.nrows, nr): # self._put_cell_rows_appended += 1 scta(bt * nc) - scva([''] * nc) + scva([UNICODE_LITERAL('')] * nc) if fmt_info: scxa(bf * nc) self.nrows = nr @@ -743,8 +762,8 @@ def put_cell_unragged(self, rowx, colx, ctype, value, xf_index): print("put_cell", rowx, colx, file=self.logfile) raise except: - print("put_cell", rowx, colx, file=self.logfile) - raise + print("put_cell", rowx, colx, file=self.logfile) + raise # === Methods after this line neither know nor care about how cells are stored. @@ -761,7 +780,7 @@ def read(self, bk): XL_SHRFMLA_ETC_ETC = ( XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2, XL_ARRAY2, XL_TABLEOP_B2, - ) + ) self_put_cell = self.put_cell local_unpack = unpack bk_get_record_parts = bk.get_record_parts @@ -843,8 +862,8 @@ def read(self, bk): if not fmt_info: continue rowx, bits1, bits2 = local_unpack('= 50: rowx, colx, xf_index, result_str, flags = local_unpack('= 30: rowx, colx, xf_index, result_str, flags = local_unpack(' 255: break # Excel does 0 to 256 inclusive self.colinfo_map[colx] = c @@ -1009,7 +1022,7 @@ def read(self, bk): self.logfile, "COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n", self.number, first_colx, last_colx, c.width, c.xf_index, flags, - ) + ) c.dump(self.logfile, header='===') elif rc == XL_DEFCOLWIDTH: self.defcolwidth, = local_unpack("= 1: - fprintf(self.logfile, - "\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \ - "*** in Sheet %d (%r).\n" \ - "*** %d CF record(s); needs_recalc_or_redraw = %d\n" \ + fprintf( + self.logfile, + "\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" + "*** in Sheet %d (%r).\n" + "*** %d CF record(s); needs_recalc_or_redraw = %d\n" "*** Bounding box is %s\n", self.number, self.name, num_CFs, needs_recalc, rangename2d(browx1, browx2+1, bcolx1, bcolx2+1), - ) + ) olist = [] # updated by the function pos = unpack_cell_range_address_list_update_pos( olist, data, 12, bv, addr_size=8) # print >> self.logfile, repr(result), len(result) if self.verbosity >= 1: - fprintf(self.logfile, - "*** %d individual range(s):\n" \ + fprintf( + self.logfile, + "*** %d individual range(s):\n" "*** %s\n", len(olist), - ", ".join([rangename2d(*coords) for coords in olist]), - ) + ", ".join(rangename2d(*coords) for coords in olist), + ) elif rc == XL_CF: if not fmt_info: continue cf_type, cmp_op, sz1, sz2, flags = unpack("> 28) & 1 patt_block = (flags >> 29) & 1 if self.verbosity >= 1: - fprintf(self.logfile, - "\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \ - "*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \ + fprintf( + self.logfile, + "\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" + "*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" "*** optional data blocks: font=%d, border=%d, pattern=%d\n", cf_type, cmp_op, sz1, sz2, flags, font_block, bord_block, patt_block, - ) + ) # hex_char_dump(data, 0, data_len, fout=self.logfile) pos = 12 if font_block: (font_height, font_options, weight, escapement, underline, - font_colour_index, two_bits, font_esc, font_underl) = \ - unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118]) + font_colour_index, two_bits, font_esc, font_underl) = unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118]) font_style = (two_bits > 1) & 1 posture = (font_options > 1) & 1 font_canc = (two_bits > 7) & 1 cancellation = (font_options > 7) & 1 if self.verbosity >= 1: - fprintf(self.logfile, - "*** Font info: height=%d, weight=%d, escapement=%d,\n" \ - "*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \ + fprintf( + self.logfile, + "*** Font info: height=%d, weight=%d, escapement=%d,\n" + "*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" "*** style=%d, posture=%d, canc=%d, cancellation=%d\n", font_height, weight, escapement, underline, font_colour_index, font_esc, font_underl, font_style, posture, font_canc, cancellation, - ) + ) pos += 118 if bord_block: pos += 8 @@ -1195,17 +1212,13 @@ def read(self, bk): fmla1 = data[pos:pos+sz1] pos += sz1 if blah and sz1: - fprintf(self.logfile, - "*** formula 1:\n", - ) + fprintf(self.logfile, "*** formula 1:\n") dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1) fmla2 = data[pos:pos+sz2] pos += sz2 assert pos == data_len if blah and sz2: - fprintf(self.logfile, - "*** formula 2:\n", - ) + fprintf(self.logfile, "*** formula 2:\n") dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1) elif rc == XL_DEFAULTROWHEIGHT: if data_len == 4: @@ -1214,12 +1227,12 @@ def read(self, bk): self.default_row_height, = unpack("= 80 and data_len >= 14: - (options, - self.first_visible_rowx, self.first_visible_colx, - self.gridline_colour_index, - self.cached_page_break_preview_mag_factor, - self.cached_normal_view_mag_factor + ( + options, + self.first_visible_rowx, self.first_visible_colx, + self.gridline_colour_index, + self.cached_page_break_preview_mag_factor, + self.cached_normal_view_mag_factor ) = unpack("= 30 # BIFF3-7 - (options, - self.first_visible_rowx, self.first_visible_colx, + ( + options, + self.first_visible_rowx, self.first_visible_colx, ) = unpack("= 0: - print(( + print( "WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d" - % (self.number, num, den) - ), file=self.logfile) + % (self.number, num, den), + file=self.logfile, + ) result = 100 self.scl_mag_factor = result elif rc == XL_PANE: ( - self.vert_split_pos, - self.horz_split_pos, - self.horz_split_first_visible, - self.vert_split_first_visible, - self.split_active_pane, + self.vert_split_pos, + self.horz_split_pos, + self.horz_split_first_visible, + self.vert_split_first_visible, + self.split_active_pane, ) = unpack(" nchars_expected: - msg = ("STRING/CONTINUE: expected %d chars, found %d" + msg = ("STRING/CONTINUE: expected %d chars, found %d" % (nchars_expected, nchars_found)) raise XLRDError(msg) rc, _unused_len, data = bk.get_record_parts() @@ -1524,10 +1537,11 @@ def update_cooked_mag_factors(self): zoom = self.cached_normal_view_mag_factor if not (10 <= zoom <=400): if blah: - print(( + print( "WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d" - % (self.number, self.cached_normal_view_mag_factor) - ), file=self.logfile) + % (self.number, self.cached_normal_view_mag_factor), + file=self.logfile, + ) zoom = self.cooked_page_break_preview_mag_factor self.cooked_normal_view_mag_factor = zoom else: @@ -1537,15 +1551,16 @@ def update_cooked_mag_factors(self): else: self.cooked_normal_view_mag_factor = self.scl_mag_factor zoom = self.cached_page_break_preview_mag_factor - if zoom == 0: + if not zoom: # VALID, defaults to 60 zoom = 60 elif not (10 <= zoom <= 400): if blah: - print(( + print( "WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r" - % (self.number, self.cached_page_break_preview_mag_factor) - ), file=self.logfile) + % (self.number, self.cached_page_break_preview_mag_factor), + file=self.logfile, + ) zoom = self.cooked_normal_view_mag_factor self.cooked_page_break_preview_mag_factor = zoom @@ -1607,7 +1622,7 @@ def insert_new_BIFF20_xf(self, cell_attr, style=0): FGE: XL_CELL_NUMBER, FDT: XL_CELL_DATE, FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text. - } + } fmt = book.format_map[xf.format_key] cellty = cellty_from_fmtty[fmt.type] self._xf_index_to_xl_type_map[xf.xf_index] = cellty @@ -1634,7 +1649,7 @@ def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0): upkbits(xf.protection, prot_bits, ( (6, 0x40, 'cell_locked'), (7, 0x80, 'formula_hidden'), - )) + )) xf.alignment.hor_align = halign_etc & 0x07 for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')): if halign_etc & mask: @@ -1653,8 +1668,15 @@ def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0): xf.parent_style_index = (0x0FFF, 0)[style] xf.alignment.vert_align = 2 # bottom xf.alignment.rotation = 0 - for attr_stem in \ - "format font alignment border background protection".split(): + attr_stems = [ + 'format', + 'font', + 'alignment', + 'border', + 'background', + 'protection', + ] + for attr_stem in attr_stems: attr = "_" + attr_stem + "_flag" setattr(xf, attr, 1) return xf @@ -1663,18 +1685,22 @@ def req_fmt_info(self): if not self.formatting_info: raise XLRDError("Feature requires open_workbook(..., formatting_info=True)") - ## - # Determine column display width. - #
-- New in version 0.6.1 - #
- # @param colx Index of the queried column, range 0 to 255. - # Note that it is possible to find out the width that will be used to display - # columns with no cell information e.g. column IV (colx=255). - # @return The column width that will be used for displaying - # the given column by Excel, in units of 1/256th of the width of a - # standard character (the digit zero in the first font). - def computed_column_width(self, colx): + """ + Determine column display width. + + :param colx: + Index of the queried column, range 0 to 255. + Note that it is possible to find out the width that will be used to + display columns with no cell information e.g. column IV (colx=255). + + :return: + The column width that will be used for displaying + the given column by Excel, in units of 1/256th of the width of a + standard character (the digit zero in the first font). + + .. versionadded:: 0.6.1 + """ self.req_fmt_info() if self.biff_version >= 80: colinfo = self.colinfo_map.get(colx, None) @@ -1719,10 +1745,10 @@ def get_nul_terminated_unicode(buf, ofs): if options & 0x14: # has a description h.desc, offset = get_nul_terminated_unicode(data, offset) - + if options & 0x80: # has a target h.target, offset = get_nul_terminated_unicode(data, offset) - + if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString # an OLEMoniker structure clsid, = unpack('<16s', data[offset:offset + 16]) @@ -1744,12 +1770,12 @@ def get_nul_terminated_unicode(buf, ofs): extra_nbytes = nbytes - true_nbytes extra_data = data[offset:offset + extra_nbytes] offset += extra_nbytes - if DEBUG: + if DEBUG: fprintf( self.logfile, "url=%r\nextra=%r\nnbytes=%d true_nbytes=%d extra_nbytes=%d\n", h.url_or_path, extra_data, nbytes, true_nbytes, extra_nbytes, - ) + ) assert extra_nbytes in (24, 0) elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46": # file moniker @@ -1784,33 +1810,33 @@ def get_nul_terminated_unicode(buf, ofs): h.type = UNICODE_LITERAL('workbook') else: h.type = UNICODE_LITERAL('unknown') - + if options & 0x8: # has textmark h.textmark, offset = get_nul_terminated_unicode(data, offset) if DEBUG: - h.dump(header="... object dump ...") + h.dump(header="... object dump ...") print("offset=%d record_size=%d" % (offset, record_size)) - + extra_nbytes = record_size - offset if extra_nbytes > 0: fprintf( self.logfile, - "*** WARNING: hyperlink at r=%d c=%d has %d extra data bytes: %s\n", - h.frowx, - h.fcolx, + "*** WARNING: hyperlink at R%dC%d has %d extra data bytes: %s\n", + h.frowx + 1, + h.fcolx + 1, extra_nbytes, - REPR(data[-extra_nbytes:]) - ) + REPR(data[-extra_nbytes:]), + ) # Seen: b"\x00\x00" also b"A\x00", b"V\x00" - elif extra_nbytes < 0: + elif extra_nbytes < 0: raise XLRDError("Bug or corrupt file, send copy of input file for debugging") self.hyperlink_list.append(h) for rowx in xrange(h.frowx, h.lrowx+1): for colx in xrange(h.fcolx, h.lcolx+1): self.hyperlink_map[rowx, colx] = h - + def handle_quicktip(self, data): rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10]) assert rcx == XL_QUICKTIP @@ -1887,7 +1913,7 @@ def handle_obj(self, data): ( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit (13, 0x2000, 'autofill'), (14, 0x4000, 'autoline'), - )) + )) elif ft == 0x00: if data[pos:data_len] == b'\0' * (data_len - pos): # ignore "optional reserved" data at end of record @@ -1945,7 +1971,7 @@ def handle_note(self, data, txos): o.col_hidden = 0 o.author = UNICODE_LITERAL('') o._object_id = None - self.cell_note_map[o.rowx, o.colx] = o + self.cell_note_map[o.rowx, o.colx] = o return # Excel 8.0+ o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8]) @@ -1965,13 +1991,12 @@ def handle_note(self, data, txos): if txo: o.text = txo.text o.rich_text_runlist = txo.rich_text_runlist - self.cell_note_map[o.rowx, o.colx] = o + self.cell_note_map[o.rowx, o.colx] = o def handle_txo(self, data): if self.biff_version < 80: return o = MSTxo() - data_len = len(data) fmt = ' Represents a user "comment" or "note". -# Note objects are accessible through Sheet.{@link #Sheet.cell_note_map}. -#
-- New in version 0.7.2 -#

+ class Note(BaseObject): - ## - # Author of note + """ + Represents a user "comment" or "note". + Note objects are accessible through :attr:`Sheet.cell_note_map`. + + .. versionadded:: 0.7.2 + """ + + #: Author of note author = UNICODE_LITERAL('') - ## - # True if the containing column is hidden - col_hidden = 0 - ## - # Column index + + #: ``True`` if the containing column is hidden + col_hidden = 0 + + #: Column index colx = 0 - ## - # List of (offset_in_string, font_index) tuples. - # Unlike Sheet.{@link #Sheet.rich_text_runlist_map}, the first offset should always be 0. + + #: List of ``(offset_in_string, font_index)`` tuples. + #: Unlike :attr:`Sheet.rich_text_runlist_map`, the first offset should + #: always be 0. rich_text_runlist = None - ## - # True if the containing row is hidden + + #: True if the containing row is hidden row_hidden = 0 - ## - # Row index + + #: Row index rowx = 0 - ## - # True if note is always shown + + #: True if note is always shown show = 0 - ## - # Text of the note + + #: Text of the note text = UNICODE_LITERAL('') -## -#

Contains the attributes of a hyperlink. -# Hyperlink objects are accessible through Sheet.{@link #Sheet.hyperlink_list} -# and Sheet.{@link #Sheet.hyperlink_map}. -#
-- New in version 0.7.2 -#

+ class Hyperlink(BaseObject): - ## - # Index of first row + """ + Contains the attributes of a hyperlink. + Hyperlink objects are accessible through :attr:`Sheet.hyperlink_list` + and :attr:`Sheet.hyperlink_map`. + + .. versionadded:: 0.7.2 + """ + + #: Index of first row frowx = None - ## - # Index of last row + + #: Index of last row lrowx = None - ## - # Index of first column + + #: Index of first column fcolx = None - ## - # Index of last column + + #: Index of last column lcolx = None - ## - # Type of hyperlink. Unicode string, one of 'url', 'unc', - # 'local file', 'workbook', 'unknown' + + #: Type of hyperlink. Unicode string, one of 'url', 'unc', + #: 'local file', 'workbook', 'unknown' type = None - ## - # The URL or file-path, depending in the type. Unicode string, except - # in the rare case of a local but non-existent file with non-ASCII - # characters in the name, in which case only the "8.3" filename is available, - # as a bytes (3.x) or str (2.x) string, with unknown encoding. + + #: The URL or file-path, depending in the type. Unicode string, except + #: in the rare case of a local but non-existent file with non-ASCII + #: characters in the name, in which case only the "8.3" filename is + #: available, as a :class:`bytes` (3.x) or :class:`str` (2.x) string, + #: *with unknown encoding.* url_or_path = None - ## - # Description ... this is displayed in the cell, - # and should be identical to the cell value. Unicode string, or None. It seems - # impossible NOT to have a description created by the Excel UI. + + #: Description. + #: This is displayed in the cell, + #: and should be identical to the cell value. Unicode string, or ``None``. + #: It seems impossible NOT to have a description created by the Excel UI. desc = None - ## - # Target frame. Unicode string. Note: I have not seen a case of this. - # It seems impossible to create one in the Excel UI. + + #: Target frame. Unicode string. + #: + #: .. note:: + #: No cases of this have been seen in the wild. + #: It seems impossible to create one in the Excel UI. target = None - ## - # "Textmark": the piece after the "#" in - # "http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99 - # part when type is "workbook". + + #: The piece after the "#" in + #: "http://docs.python.org/library#struct_module", or the ``Sheet1!A1:Z99`` + #: part when type is "workbook". textmark = None - ## - # The text of the "quick tip" displayed when the cursor - # hovers over the hyperlink. + + #: The text of the "quick tip" displayed when the cursor + #: hovers over the hyperlink. quicktip = None # === helpers === @@ -2181,7 +2219,7 @@ def unpack_RK(rk_str): FGE: XL_CELL_NUMBER, FDT: XL_CELL_DATE, FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text. - } +} ctype_text = { XL_CELL_EMPTY: 'empty', @@ -2191,67 +2229,74 @@ def unpack_RK(rk_str): XL_CELL_BOOLEAN: 'bool', XL_CELL_ERROR: 'error', XL_CELL_BLANK: 'blank', - } - -## -#

Contains the data for one cell.

-# -#

WARNING: You don't call this class yourself. You access Cell objects -# via methods of the {@link #Sheet} object(s) that you found in the {@link #Book} object that -# was returned when you called xlrd.open_workbook("myfile.xls").

-#

Cell objects have three attributes: ctype is an int, value -# (which depends on ctype) and xf_index. -# If "formatting_info" is not enabled when the workbook is opened, xf_index will be None. -# The following table describes the types of cells and how their values -# are represented in Python.

-# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -# -#
Type symbolType numberPython value
XL_CELL_EMPTY0empty string u''
XL_CELL_TEXT1a Unicode string
XL_CELL_NUMBER2float
XL_CELL_DATE3float
XL_CELL_BOOLEAN4int; 1 means TRUE, 0 means FALSE
XL_CELL_ERROR5int representing internal Excel codes; for a text representation, -# refer to the supplied dictionary error_text_from_code
XL_CELL_BLANK6empty string u''. Note: this type will appear only when -# open_workbook(..., formatting_info=True) is used.
-#

+} + class Cell(BaseObject): + """ + Contains the data for one cell. + + .. warning:: + You don't call this class yourself. You access :class:`Cell` objects + via methods of the :class:`Sheet` object(s) that you found in the + :class:`~xlrd.book.Book` object that was returned when you called + :func:`~xlrd.open_workbook` + + Cell objects have three attributes: ``ctype`` is an int, ``value`` + (which depends on ``ctype``) and ``xf_index``. + If ``formatting_info`` is not enabled when the workbook is opened, + ``xf_index`` will be ``None``. + + The following table describes the types of cells and how their values + are represented in Python. + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Type symbolType numberPython value
XL_CELL_EMPTY0empty string ''
XL_CELL_TEXT1a Unicode string
XL_CELL_NUMBER2float
XL_CELL_DATE3float
XL_CELL_BOOLEAN4int; 1 means TRUE, 0 means FALSE
XL_CELL_ERROR5int representing internal Excel codes; for a text representation, + refer to the supplied dictionary error_text_from_code
XL_CELL_BLANK6empty string ''. Note: this type will appear only when + open_workbook(..., formatting_info=True) is used.
+ """ __slots__ = ['ctype', 'value', 'xf_index'] @@ -2266,108 +2311,80 @@ def __repr__(self): else: return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index) -## -# There is one and only one instance of an empty cell -- it's a singleton. This is it. -# You may use a test like "acell is empty_cell". -empty_cell = Cell(XL_CELL_EMPTY, '') +empty_cell = Cell(XL_CELL_EMPTY, UNICODE_LITERAL('')) ##### =============== Colinfo and Rowinfo ============================== ##### -## -# Width and default formatting information that applies to one or -# more columns in a sheet. Derived from COLINFO records. -# -#

Here is the default hierarchy for width, according to the OOo docs: -# -#
"""In BIFF3, if a COLINFO record is missing for a column, -# the width specified in the record DEFCOLWIDTH is used instead. -# -#
In BIFF4-BIFF7, the width set in this [COLINFO] record is only used, -# if the corresponding bit for this column is cleared in the GCW -# record, otherwise the column width set in the DEFCOLWIDTH record -# is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]). -# -#
In BIFF8, if a COLINFO record is missing for a column, -# the width specified in the record STANDARDWIDTH is used. -# If this [STANDARDWIDTH] record is also missing, -# the column width of the record DEFCOLWIDTH is used instead.""" -#
-# -# Footnote: The docs on the GCW record say this: -# """
-# If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH -# record. If a bit is cleared, the corresponding column uses the width set in the -# COLINFO record for this column. -#
If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if -# the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH -# record of the worksheet will be used instead. -#
"""
-# At the moment (2007-01-17) xlrd is going with the GCW version of the story. -# Reference to the source may be useful: see the computed_column_width(colx) method -# of the Sheet class. -#
-- New in version 0.6.1 -#

class Colinfo(BaseObject): - ## - # Width of the column in 1/256 of the width of the zero character, - # using default font (first FONT record in the file). + """ + Width and default formatting information that applies to one or + more columns in a sheet. Derived from ``COLINFO`` records. + + Here is the default hierarchy for width, according to the OOo docs: + + In BIFF3, if a ``COLINFO`` record is missing for a column, + the width specified in the record ``DEFCOLWIDTH`` is used instead. + + In BIFF4-BIFF7, the width set in this ``COLINFO`` record is only used, + if the corresponding bit for this column is cleared in the ``GCW`` + record, otherwise the column width set in the ``DEFCOLWIDTH`` record + is used (the ``STANDARDWIDTH`` record is always ignored in this case [#f1]_). + + In BIFF8, if a ``COLINFO`` record is missing for a column, + the width specified in the record ``STANDARDWIDTH`` is used. + If this ``STANDARDWIDTH`` record is also missing, + the column width of the record ``DEFCOLWIDTH`` is used instead. + + .. [#f1] The docs on the ``GCW`` record say this: + + If a bit is set, the corresponding column uses the width set in the + ``STANDARDWIDTH`` record. If a bit is cleared, the corresponding column + uses the width set in the ``COLINFO`` record for this column. + + If a bit is set, and the worksheet does not contain the ``STANDARDWIDTH`` + record, or if the bit is cleared, and the worksheet does not contain the + ``COLINFO`` record, the ``DEFCOLWIDTH`` record of the worksheet will be + used instead. + + xlrd goes with the GCW version of the story. + Reference to the source may be useful: see + :meth:`Sheet.computed_column_width`. + + .. versionadded:: 0.6.1 + """ + + #: Width of the column in 1/256 of the width of the zero character, + #: using default font (first ``FONT`` record in the file). width = 0 - ## - # XF index to be used for formatting empty cells. + + #: XF index to be used for formatting empty cells. xf_index = -1 - ## - # 1 = column is hidden + + #: 1 = column is hidden hidden = 0 - ## - # Value of a 1-bit flag whose purpose is unknown - # but is often seen set to 1 + + #: Value of a 1-bit flag whose purpose is unknown + #: but is often seen set to 1 bit1_flag = 0 - ## - # Outline level of the column, in range(7). - # (0 = no outline) + + #: Outline level of the column, in ``range(7)``. + #: (0 = no outline) outline_level = 0 - ## - # 1 = column is collapsed + + #: 1 = column is collapsed collapsed = 0 _USE_SLOTS = 1 -## -#

Height and default formatting information that applies to a row in a sheet. -# Derived from ROW records. -#
-- New in version 0.6.1

-# -#

height: Height of the row, in twips. One twip == 1/20 of a point.

-# -#

has_default_height: 0 = Row has custom height; 1 = Row has default height.

-# -#

outline_level: Outline level of the row (0 to 7)

-# -#

outline_group_starts_ends: 1 = Outline group starts or ends here (depending on where the -# outline buttons are located, see WSBOOL record [TODO ??]), -# and is collapsed

-# -#

hidden: 1 = Row is hidden (manually, or by a filter or outline group)

-# -#

height_mismatch: 1 = Row height and default font height do not match

-# -#

has_default_xf_index: 1 = the xf_index attribute is usable; 0 = ignore it

-# -#

xf_index: Index to default XF record for empty cells in this row. -# Don't use this if has_default_xf_index == 0.

-# -#

additional_space_above: This flag is set, if the upper border of at least one cell in this row -# or if the lower border of at least one cell in the row above is -# formatted with a thick line style. Thin and medium line styles are not -# taken into account.

-# -#

additional_space_below: This flag is set, if the lower border of at least one cell in this row -# or if the upper border of at least one cell in the row below is -# formatted with a medium or thick line style. Thin line styles are not -# taken into account.

class Rowinfo(BaseObject): + """ + Height and default formatting information that applies to a row in a sheet. + Derived from ``ROW`` records. + + .. versionadded:: 0.6.1 + """ if _USE_SLOTS: __slots__ = ( @@ -2381,18 +2398,46 @@ class Rowinfo(BaseObject): "xf_index", "additional_space_above", "additional_space_below", - ) + ) def __init__(self): + #: Height of the row, in twips. One twip == 1/20 of a point. self.height = None + + #: 0 = Row has custom height; 1 = Row has default height. self.has_default_height = None + + #: Outline level of the row (0 to 7) self.outline_level = None + + #: 1 = Outline group starts or ends here (depending on where the + #: outline buttons are located, see ``WSBOOL`` record, which is not + #: parsed by xlrd), *and* is collapsed. self.outline_group_starts_ends = None + + #: 1 = Row is hidden (manually, or by a filter or outline group) self.hidden = None + + #: 1 = Row height and default font height do not match. self.height_mismatch = None + + #: 1 = the xf_index attribute is usable; 0 = ignore it. self.has_default_xf_index = None + + #: Index to default :class:`~xlrd.formatting.XF` record for empty cells + #: in this row. Don't use this if ``has_default_xf_index == 0``. self.xf_index = None + + #: This flag is set if the upper border of at least one cell in this + #: row or if the lower border of at least one cell in the row above is + #: formatted with a thick line style. Thin and medium line styles are + #: not taken into account. self.additional_space_above = None + + #: This flag is set if the lower border of at least one cell in this row + #: or if the upper border of at least one cell in the row below is + #: formatted with a medium or thick line style. Thin line styles are not + #: taken into account. self.additional_space_below = None def __getstate__(self): @@ -2407,7 +2452,7 @@ def __getstate__(self): self.xf_index, self.additional_space_above, self.additional_space_below, - ) + ) def __setstate__(self, state): ( @@ -2421,4 +2466,4 @@ def __setstate__(self, state): self.xf_index, self.additional_space_above, self.additional_space_below, - ) = state + ) = state diff --git a/SUEWSPrepare/Modules/xlrd/timemachine.py b/SUEWSPrepare/Modules/xlrd/timemachine.py index a068db3..a519299 100644 --- a/SUEWSPrepare/Modules/xlrd/timemachine.py +++ b/SUEWSPrepare/Modules/xlrd/timemachine.py @@ -8,6 +8,7 @@ # usage: from timemachine import * from __future__ import print_function + import sys python_version = sys.version_info[:2] # e.g. version 2.6 -> (2, 6) @@ -23,7 +24,7 @@ def fprintf(f, fmt, *vargs): if fmt.endswith('\n'): print(fmt[:-1] % vargs, file=f) else: - print(fmt % vargs, end=' ', file=f) + print(fmt % vargs, end=' ', file=f) EXCEL_TEXT_TYPES = (str, bytes, bytearray) # xlwt: isinstance(obj, EXCEL_TEXT_TYPES) REPR = ascii xrange = range @@ -40,7 +41,7 @@ def fprintf(f, fmt, *vargs): if fmt.endswith('\n'): print(fmt[:-1] % vargs, file=f) else: - print(fmt % vargs, end=' ', file=f) + print(fmt % vargs, end=' ', file=f) try: EXCEL_TEXT_TYPES = basestring # xlwt: isinstance(obj, EXCEL_TEXT_TYPES) except NameError: @@ -49,4 +50,4 @@ def fprintf(f, fmt, *vargs): xrange = xrange # following used only to overcome 2.x ElementTree gimmick which # returns text as `str` if it's ascii, otherwise `unicode` - ensure_unicode = unicode # used only in xlsx.py + ensure_unicode = unicode # used only in xlsx.py diff --git a/SUEWSPrepare/Modules/xlrd/xldate.py b/SUEWSPrepare/Modules/xlrd/xldate.py index dc7b9c8..d84c650 100644 --- a/SUEWSPrepare/Modules/xlrd/xldate.py +++ b/SUEWSPrepare/Modules/xlrd/xldate.py @@ -1,22 +1,23 @@ -# -*- coding: cp1252 -*- - +# -*- coding: utf-8 -*- +# Copyright (c) 2005-2008 Stephen John Machin, Lingfo Pty Ltd +# This module is part of the xlrd package, which is released under a +# BSD-style licence. # No part of the content of this file was derived from the works of David Giffin. +""" +Tools for working with dates and times in Excel files. + +The conversion from ``days`` to ``(year, month, day)`` starts with +an integral "julian day number" aka JDN. +FWIW: + +- JDN 0 corresponds to noon on Monday November 24 in Gregorian year -4713. + +More importantly: -## -#

Copyright 2005-2008 Stephen John Machin, Lingfo Pty Ltd

-#

This module is part of the xlrd package, which is released under a BSD-style licence.

-# -#

Provides function(s) for dealing with Microsoft Excel dates.

-## - -# 2008-10-18 SJM Fix bug in xldate_from_date_tuple (affected some years after 2099) - -# The conversion from days to (year, month, day) starts with -# an integral "julian day number" aka JDN. -# FWIW, JDN 0 corresponds to noon on Monday November 24 in Gregorian year -4713. -# More importantly: -# Noon on Gregorian 1900-03-01 (day 61 in the 1900-based system) is JDN 2415080.0 -# Noon on Gregorian 1904-01-02 (day 1 in the 1904-based system) is JDN 2416482.0 +- Noon on Gregorian 1900-03-01 (day 61 in the 1900-based system) is JDN 2415080.0 +- Noon on Gregorian 1904-01-02 (day 1 in the 1904-based system) is JDN 2416482.0 + +""" import datetime _JDN_delta = (2415080 - 61, 2416482 - 1) @@ -27,37 +28,66 @@ epoch_1900 = datetime.datetime(1899, 12, 31) epoch_1900_minus_1 = datetime.datetime(1899, 12, 30) -class XLDateError(ValueError): pass - -class XLDateNegative(XLDateError): pass -class XLDateAmbiguous(XLDateError): pass -class XLDateTooLarge(XLDateError): pass -class XLDateBadDatemode(XLDateError): pass -class XLDateBadTuple(XLDateError): pass - -_XLDAYS_TOO_LARGE = (2958466, 2958466 - 1462) # This is equivalent to 10000-01-01 - -## -# Convert an Excel number (presumed to represent a date, a datetime or a time) into -# a tuple suitable for feeding to datetime or mx.DateTime constructors. -# @param xldate The Excel number -# @param datemode 0: 1900-based, 1: 1904-based. -#
WARNING: when using this function to -# interpret the contents of a workbook, you should pass in the Book.datemode -# attribute of that workbook. Whether -# the workbook has ever been anywhere near a Macintosh is irrelevant. -# @return Gregorian (year, month, day, hour, minute, nearest_second). -#
Special case: if 0.0 <= xldate < 1.0, it is assumed to represent a time; -# (0, 0, 0, hour, minute, second) will be returned. -#
Note: 1904-01-01 is not regarded as a valid date in the datemode 1 system; its "serial number" -# is zero. -# @throws XLDateNegative xldate < 0.00 -# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0) -# @throws XLDateTooLarge Gregorian year 10000 or later -# @throws XLDateBadDatemode datemode arg is neither 0 nor 1 -# @throws XLDateError Covers the 4 specific errors +# This is equivalent to 10000-01-01: +_XLDAYS_TOO_LARGE = (2958466, 2958466 - 1462) + + +class XLDateError(ValueError): + "A base class for all datetime-related errors." + + +class XLDateNegative(XLDateError): + "``xldate < 0.00``" + + +class XLDateAmbiguous(XLDateError): + "The 1900 leap-year problem ``(datemode == 0 and 1.0 <= xldate < 61.0)``" + + +class XLDateTooLarge(XLDateError): + "Gregorian year 10000 or later" + + +class XLDateBadDatemode(XLDateError): + "``datemode`` arg is neither 0 nor 1" + + +class XLDateBadTuple(XLDateError): + pass + def xldate_as_tuple(xldate, datemode): + """ + Convert an Excel number (presumed to represent a date, a datetime or a time) into + a tuple suitable for feeding to datetime or mx.DateTime constructors. + + :param xldate: The Excel number + :param datemode: 0: 1900-based, 1: 1904-based. + :raises xlrd.xldate.XLDateNegative: + :raises xlrd.xldate.XLDateAmbiguous: + + :raises xlrd.xldate.XLDateTooLarge: + :raises xlrd.xldate.XLDateBadDatemode: + :raises xlrd.xldate.XLDateError: + :returns: Gregorian ``(year, month, day, hour, minute, nearest_second)``. + + .. warning:: + + When using this function to interpret the contents of a workbook, you + should pass in the :attr:`~xlrd.book.Book.datemode` + attribute of that workbook. Whether the workbook has ever been anywhere + near a Macintosh is irrelevant. + + .. admonition:: Special case + + If ``0.0 <= xldate < 1.0``, it is assumed to represent a time; + ``(0, 0, 0, hour, minute, second)`` will be returned. + + .. note:: + + ``1904-01-01`` is not regarded as a valid date in the ``datemode==1`` + system; its "serial number" is zero. + """ if datemode not in (0, 1): raise XLDateBadDatemode(datemode) if xldate == 0.00: @@ -97,16 +127,15 @@ def xldate_as_tuple(xldate, datemode): return ((yreg // 1461) - 4716, mp + 3, d, hour, minute, second) -## -# Convert an Excel date/time number into a datetime.datetime object. -# -# @param xldate The Excel number -# @param datemode 0: 1900-based, 1: 1904-based. -# -# @return a datetime.datetime() object. -# def xldate_as_datetime(xldate, datemode): - """Convert an Excel date/time number into a datetime.datetime object.""" + """ + Convert an Excel date/time number into a :class:`datetime.datetime` object. + + :param xldate: The Excel number + :param datemode: 0: 1900-based, 1: 1904-based. + + :returns: A :class:`datetime.datetime` object. + """ # Set the epoch based on the 1900/1904 datemode. if datemode: @@ -140,19 +169,21 @@ def _leap(y): _days_in_month = (None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31) -## -# Convert a date tuple (year, month, day) to an Excel date. -# @param year Gregorian year. -# @param month 1 <= month <= 12 -# @param day 1 <= day <= last day of that (year, month) -# @param datemode 0: 1900-based, 1: 1904-based. -# @throws XLDateAmbiguous The 1900 leap-year problem (datemode == 0 and 1.0 <= xldate < 61.0) -# @throws XLDateBadDatemode datemode arg is neither 0 nor 1 -# @throws XLDateBadTuple (year, month, day) is too early/late or has invalid component(s) -# @throws XLDateError Covers the specific errors def xldate_from_date_tuple(date_tuple, datemode): - """Create an excel date from a tuple of (year, month, day)""" + """ + Convert a date tuple (year, month, day) to an Excel date. + + :param year: Gregorian year. + :param month: ``1 <= month <= 12`` + :param day: ``1 <= day <= last day of that (year, month)`` + :param datemode: 0: 1900-based, 1: 1904-based. + :raises xlrd.xldate.XLDateAmbiguous: + :raises xlrd.xldate.XLDateBadDatemode: + :raises xlrd.xldate.XLDateBadTuple: + ``(year, month, day)`` is too early/late or has invalid component(s) + :raises xlrd.xldate.XLDateError: + """ year, month, day = date_tuple if datemode not in (0, 1): @@ -165,8 +196,8 @@ def xldate_from_date_tuple(date_tuple, datemode): raise XLDateBadTuple("Invalid year: %r" % ((year, month, day),)) if not (1 <= month <= 12): raise XLDateBadTuple("Invalid month: %r" % ((year, month, day),)) - if day < 1 \ - or (day > _days_in_month[month] and not(day == 29 and month == 2 and _leap(year))): + if (day < 1 or + (day > _days_in_month[month] and not(day == 29 and month == 2 and _leap(year)))): raise XLDateBadTuple("Invalid day: %r" % ((year, month, day),)) Yp = year + 4716 @@ -185,29 +216,33 @@ def xldate_from_date_tuple(date_tuple, datemode): raise XLDateAmbiguous("Before 1900-03-01: %r" % ((year, month, day),)) return float(xldays) -## -# Convert a time tuple (hour, minute, second) to an Excel "date" value (fraction of a day). -# @param hour 0 <= hour < 24 -# @param minute 0 <= minute < 60 -# @param second 0 <= second < 60 -# @throws XLDateBadTuple Out-of-range hour, minute, or second def xldate_from_time_tuple(time_tuple): - """Create an excel date from a tuple of (hour, minute, second)""" + """ + Convert a time tuple ``(hour, minute, second)`` to an Excel "date" value + (fraction of a day). + + :param hour: ``0 <= hour < 24`` + :param minute: ``0 <= minute < 60`` + :param second: ``0 <= second < 60`` + :raises xlrd.xldate.XLDateBadTuple: Out-of-range hour, minute, or second + """ hour, minute, second = time_tuple if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60: return ((second / 60.0 + minute) / 60.0 + hour) / 24.0 raise XLDateBadTuple("Invalid (hour, minute, second): %r" % ((hour, minute, second),)) -## -# Convert a datetime tuple (year, month, day, hour, minute, second) to an Excel date value. -# For more details, refer to other xldate_from_*_tuple functions. -# @param datetime_tuple (year, month, day, hour, minute, second) -# @param datemode 0: 1900-based, 1: 1904-based. def xldate_from_datetime_tuple(datetime_tuple, datemode): + """ + Convert a datetime tuple ``(year, month, day, hour, minute, second)`` to an + Excel date value. + For more details, refer to other xldate_from_*_tuple functions. + + :param datetime_tuple: ``(year, month, day, hour, minute, second)`` + :param datemode: 0: 1900-based, 1: 1904-based. + """ return ( - xldate_from_date_tuple(datetime_tuple[:3], datemode) - + + xldate_from_date_tuple(datetime_tuple[:3], datemode) + xldate_from_time_tuple(datetime_tuple[3:]) - ) + ) diff --git a/SUEWSPrepare/Modules/xlrd/xlsx.py b/SUEWSPrepare/Modules/xlrd/xlsx.py index 3c036de..fa1547b 100644 --- a/SUEWSPrepare/Modules/xlrd/xlsx.py +++ b/SUEWSPrepare/Modules/xlrd/xlsx.py @@ -5,15 +5,21 @@ from __future__ import print_function, unicode_literals -DEBUG = 0 - -import sys import re -from .timemachine import * +import sys +from os.path import join, normpath + +from .biffh import ( + XL_CELL_BLANK, XL_CELL_BOOLEAN, XL_CELL_ERROR, XL_CELL_TEXT, XLRDError, + error_text_from_code, +) from .book import Book, Name -from .biffh import error_text_from_code, XLRDError, XL_CELL_BLANK, XL_CELL_TEXT, XL_CELL_BOOLEAN, XL_CELL_ERROR -from .formatting import is_date_format_string, Format, XF +from .formatting import XF, Format, is_date_format_string from .sheet import Sheet +from .timemachine import * + +DEBUG = 0 + DLF = sys.stdout # Default Log File @@ -27,20 +33,22 @@ def ensure_elementtree_imported(verbosity, logfile): return if "IronPython" in sys.version: import xml.etree.ElementTree as ET - #### 2.7.2.1: fails later with + #### 2.7.2.1: fails later with #### NotImplementedError: iterparse is not supported on IronPython. (CP #31923) else: - try: import xml.etree.cElementTree as ET + try: import defusedxml.cElementTree as ET except ImportError: - try: import cElementTree as ET + try: import xml.etree.cElementTree as ET except ImportError: - try: import lxml.etree as ET + try: import cElementTree as ET except ImportError: - try: import xml.etree.ElementTree as ET + try: import lxml.etree as ET except ImportError: - try: import elementtree.ElementTree as ET + try: import xml.etree.ElementTree as ET except ImportError: - raise Exception("Failed to import an ElementTree implementation") + try: import elementtree.ElementTree as ET + except ImportError: + raise Exception("Failed to import an ElementTree implementation") if hasattr(ET, 'iterparse'): _dummy_stream = BYTES_IO(b'') try: @@ -48,15 +56,15 @@ def ensure_elementtree_imported(verbosity, logfile): ET_has_iterparse = True except NotImplementedError: pass - Element_has_iter = hasattr(ET.ElementTree, 'iter') + Element_has_iter = hasattr(ET, 'ElementTree') and hasattr(ET.ElementTree, 'iter') if verbosity: etree_version = repr([ (item, getattr(ET, item)) for item in ET.__dict__.keys() if item.lower().replace('_', '') == 'version' - ]) + ]) print(ET.__file__, ET.__name__, etree_version, ET_has_iterparse, file=logfile) - + def split_tag(tag): pos = tag.rfind('}') + 1 if pos >= 2: @@ -75,7 +83,8 @@ def augment_keys(adict, uri): _UPPERCASE_1_REL_INDEX[_x] = 0 del _x -def cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX): +def cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX, + allow_no_col=False): # Extract column index from cell name # A => 0, Z =>25, AA => 26, XFD => 16383 colx = 0 @@ -87,9 +96,18 @@ def cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX): if lv: colx = colx * 26 + lv else: # start of row number; can't be '0' - colx = colx - 1 - assert 0 <= colx < X12_MAX_COLS - break + if charx == 0: + # there was no col marker + if allow_no_col: + colx = None + break + else: + raise Exception( + 'Missing col in cell name %r', cell_name) + else: + colx = colx - 1 + assert 0 <= colx < X12_MAX_COLS + break except KeyError: raise Exception('Unexpected character %r in cell name %r' % (c, cell_name)) rowx = int(cell_name[charx:]) - 1 @@ -116,9 +134,8 @@ def cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX): IS_TAG = U_SSML12 + 'is' # cell child: inline string def unescape(s, - subber=re.compile(r'_x[0-9A-Fa-f]{4,4}_', re.UNICODE).sub, - repl=lambda mobj: unichr(int(mobj.group(0)[2:6], 16)), - ): + subber=re.compile(r'_x[0-9A-Fa-f]{4,4}_', re.UNICODE).sub, + repl=lambda mobj: unichr(int(mobj.group(0)[2:6], 16))): if "_" in s: return subber(repl, s) return s @@ -209,7 +226,7 @@ def cnv_xsd_boolean(s): ("", "option_flags", 0, ), ("", "result", None, ), ("", "stack", None, ), - ) +) def make_name_access_maps(bk): name_and_scope_map = {} # (name.lower(), scope): Name_object @@ -284,7 +301,7 @@ def __init__(self, bk, logfile=DLF, verbosity=False): U_DC+"creator": ("creator", cnv_ST_Xstring), U_DCTERMS+"modified": ("modified", cnv_ST_Xstring), U_DCTERMS+"created": ("created", cnv_ST_Xstring), - } + } def process_coreprops(self, stream): if self.verbosity >= 2: @@ -306,6 +323,10 @@ def process_coreprops(self, stream): fprintf(self.logfile, "props: %r\n", props) self.finish_off() + @staticmethod + def convert_filename(name): + return name.replace('\\', '/').lower() + def process_rels(self, stream): if self.verbosity >= 2: fprintf(self.logfile, "\n=== Relationships ===\n") @@ -313,7 +334,7 @@ def process_rels(self, stream): r_tag = U_PKGREL + 'Relationship' for elem in tree.findall(r_tag): rid = elem.get('Id') - target = elem.get('Target') + target = X12Book.convert_filename(elem.get('Target')) reltype = elem.get('Type').split('/')[-1] if self.verbosity >= 2: self.dumpout('Id=%r Type=%r Target=%r', rid, reltype, target) @@ -371,8 +392,8 @@ def do_sheet(self, elem): None: 0, 'visible': 0, 'hidden': 1, - 'veryHidden': 2 - } + 'veryHidden': 2, + } bk._sheet_visibility.append(visibility_map[state]) sheet = Sheet(bk, position=None, name=name, number=sheetx) sheet.utter_max_rows = X12_MAX_ROWS @@ -394,7 +415,7 @@ def do_workbookpr(self, elem): 'definedNames': do_defined_names, 'workbookPr': do_workbookpr, 'sheet': do_sheet, - } + } augment_keys(tag2meth, U_SSML12) class X12SST(X12General): @@ -407,7 +428,7 @@ def __init__(self, bk, logfile=DLF, verbosity=0): self.process_stream = self.process_stream_iterparse else: self.process_stream = self.process_stream_findall - + def process_stream_iterparse(self, stream, heading=None): if self.verbosity >= 2 and heading is not None: fprintf(self.logfile, "\n=== %s ===\n", heading) @@ -421,7 +442,7 @@ def process_stream_iterparse(self, stream, heading=None): fprintf(self.logfile, "element #%d\n", elemno) self.dump_elem(elem) result = get_text_from_si_or_is(self, elem) - sst.append(result) + sst.append(result) elem.clear() # destroy all child elements if self.verbosity >= 2: self.dumpout('Entries in SST: %d', len(sst)) @@ -491,10 +512,7 @@ def do_xf(self, elem): is_date = self.fmt_is_date.get(numFmtId, 0) self.bk._xf_index_to_xl_type_map[xfx] = is_date + 2 if self.verbosity >= 3: - self.dumpout( - 'xfx=%d numFmtId=%d', - xfx, numFmtId, - ) + self.dumpout('xfx=%d numFmtId=%d', xfx, numFmtId) self.dumpout(repr(self.bk._xf_index_to_xl_type_map)) tag2meth = { @@ -502,7 +520,7 @@ def do_xf(self, elem): 'cellXfs': do_cellxfs, 'numFmt': do_numfmt, 'xf': do_xf, - } + } augment_keys(tag2meth, U_SSML12) class X12Sheet(X12General): @@ -514,6 +532,8 @@ def __init__(self, sheet, logfile=DLF, verbosity=0): self.rowx = -1 # We may need to count them. self.bk = sheet.book self.sst = self.bk._sharedstrings + self.relid2path = {} + self.relid2reltype = {} self.merged_cells = sheet.merged_cells self.warned_no_cell_name = 0 self.warned_no_row_num = 0 @@ -523,7 +543,6 @@ def __init__(self, sheet, logfile=DLF, verbosity=0): def own_process_stream(self, stream, heading=None): if self.verbosity >= 2 and heading is not None: fprintf(self.logfile, "\n=== %s ===\n", heading) - getmethod = self.tag2meth.get row_tag = U_SSML12 + "row" self_do_row = self.do_row for event, elem in ET.iterparse(stream): @@ -536,6 +555,20 @@ def own_process_stream(self, stream, heading=None): self.do_merge_cell(elem) self.finish_off() + def process_rels(self, stream): + if self.verbosity >= 2: + fprintf(self.logfile, "\n=== Sheet Relationships ===\n") + tree = ET.parse(stream) + r_tag = U_PKGREL + 'Relationship' + for elem in tree.findall(r_tag): + rid = elem.get('Id') + target = elem.get('Target') + reltype = elem.get('Type').split('/')[-1] + if self.verbosity >= 2: + self.dumpout('Id=%r Type=%r Target=%r', rid, reltype, target) + self.relid2reltype[rid] = reltype + self.relid2path[rid] = normpath(join('xl/worksheets', target)) + def process_comments_stream(self, stream): root = ET.parse(stream).getroot() author_list = root[0] @@ -565,25 +598,32 @@ def do_dimension(self, elem): if ref: # print >> self.logfile, "dimension: ref=%r" % ref last_cell_ref = ref.split(':')[-1] # example: "Z99" - rowx, colx = cell_name_to_rowx_colx(last_cell_ref) + rowx, colx = cell_name_to_rowx_colx( + last_cell_ref, allow_no_col=True) self.sheet._dimnrows = rowx + 1 - self.sheet._dimncols = colx + 1 + if colx is not None: + self.sheet._dimncols = colx + 1 def do_merge_cell(self, elem): # The ref attribute should be a cell range like "B1:D5". ref = elem.get('ref') if ref: - first_cell_ref, last_cell_ref = ref.split(':') + try: + first_cell_ref, last_cell_ref = ref.split(':') + except ValueError: + # encountered a single cell merge, e.g. "B3" + first_cell_ref = ref + last_cell_ref = ref first_rowx, first_colx = cell_name_to_rowx_colx(first_cell_ref) last_rowx, last_colx = cell_name_to_rowx_colx(last_cell_ref) self.merged_cells.append((first_rowx, last_rowx + 1, first_colx, last_colx + 1)) def do_row(self, row_elem): - + def bad_child_tag(child_tag): - raise Exception('cell type %s has unexpected child <%s> at rowx=%r colx=%r' % (cell_type, child_tag, rowx, colx)) - + raise Exception('cell type %s has unexpected child <%s> at rowx=%r colx=%r' % (cell_type, child_tag, rowx, colx)) + row_number = row_elem.get('r') if row_number is None: # Yes, it's optional. self.rowx += 1 @@ -632,7 +672,6 @@ def bad_child_tag(child_tag): xf_index = int(cell_elem.get('s', '0')) cell_type = cell_elem.get('t', 'n') tvalue = None - formula = None if cell_type == 'n': # n = number. Most frequent type. # child contains plain text which can go straight into float() @@ -642,7 +681,8 @@ def bad_child_tag(child_tag): if child_tag == V_TAG: tvalue = child.text elif child_tag == F_TAG: - formula = cooked_text(self, child) + # formula + pass else: raise Exception('unexpected tag %r' % child_tag) if not tvalue: @@ -659,7 +699,7 @@ def bad_child_tag(child_tag): tvalue = child.text elif child_tag == F_TAG: # formula not expected here, but gnumeric does it. - formula = child.text + pass else: bad_child_tag(child_tag) if not tvalue: @@ -678,7 +718,8 @@ def bad_child_tag(child_tag): if child_tag == V_TAG: tvalue = cooked_text(self, child) elif child_tag == F_TAG: - formula = cooked_text(self, child) + # formula + pass else: bad_child_tag(child_tag) # assert tvalue is not None and formula is not None @@ -687,61 +728,67 @@ def bad_child_tag(child_tag): elif cell_type == "b": # b = boolean # child contains "0" or "1" - # Maybe the data should be converted with cnv_xsd_boolean; - # ECMA standard is silent; Excel 2007 writes 0 or 1 for child in cell_elem: child_tag = child.tag if child_tag == V_TAG: tvalue = child.text elif child_tag == F_TAG: - formula = cooked_text(self, child) + # formula + pass else: bad_child_tag(child_tag) - self.sheet.put_cell(rowx, colx, XL_CELL_BOOLEAN, int(tvalue), xf_index) + self.sheet.put_cell(rowx, colx, XL_CELL_BOOLEAN, cnv_xsd_boolean(tvalue), xf_index) elif cell_type == "e": # e = error # child contains e.g. "#REF!" + tvalue = '#N/A' for child in cell_elem: child_tag = child.tag if child_tag == V_TAG: tvalue = child.text elif child_tag == F_TAG: - formula = cooked_text(self, child) + # formula + pass else: bad_child_tag(child_tag) value = error_code_from_text[tvalue] self.sheet.put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index) elif cell_type == "inlineStr": # Not expected in files produced by Excel. - # Only possible child is . # It's a way of allowing 3rd party s/w to write text (including rich text) cells # without having to build a shared string table for child in cell_elem: child_tag = child.tag if child_tag == IS_TAG: tvalue = get_text_from_si_or_is(self, child) + elif child_tag == V_TAG: + tvalue = child.text + elif child_tag == F_TAG: + # formula + pass else: bad_child_tag(child_tag) - assert tvalue is not None - self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index) + if not tvalue: + if self.bk.formatting_info: + self.sheet.put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index) + else: + self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index) else: raise Exception("Unknown cell type %r in rowx=%d colx=%d" % (cell_type, rowx, colx)) tag2meth = { 'row': do_row, - } + } augment_keys(tag2meth, U_SSML12) -def open_workbook_2007_xml( - zf, - component_names, - logfile=sys.stdout, - verbosity=0, - use_mmap=0, - formatting_info=0, - on_demand=0, - ragged_rows=0, - ): +def open_workbook_2007_xml(zf, + component_names, + logfile=sys.stdout, + verbosity=0, + use_mmap=0, + formatting_info=0, + on_demand=0, + ragged_rows=0): ensure_elementtree_imported(verbosity, logfile) bk = Book() bk.logfile = logfile @@ -793,11 +840,20 @@ def open_workbook_2007_xml( heading = "Sheet %r (sheetx=%d) from %r" % (sheet.name, sheetx, fname) x12sheet.process_stream(zflo, heading) del zflo - comments_fname = 'xl/comments%d.xml' % (sheetx + 1) - if comments_fname in component_names: - comments_stream = zf.open(component_names[comments_fname]) - x12sheet.process_comments_stream(comments_stream) - del comments_stream + + rels_fname = 'xl/worksheets/_rels/%s.rels' % fname.rsplit('/', 1)[-1] + if rels_fname in component_names: + zfrels = zf.open(rels_fname) + x12sheet.process_rels(zfrels) + del zfrels + + for relid, reltype in x12sheet.relid2reltype.items(): + if reltype == 'comments': + comments_fname = x12sheet.relid2path.get(relid) + if comments_fname and comments_fname in component_names: + comments_stream = zf.open(comments_fname) + x12sheet.process_comments_stream(comments_stream) + del comments_stream sheet.tidy_dimensions() diff --git a/SUEWSPrepare/Modules/xlutils/compat.py b/SUEWSPrepare/Modules/xlutils/compat.py new file mode 100644 index 0000000..9de9869 --- /dev/null +++ b/SUEWSPrepare/Modules/xlutils/compat.py @@ -0,0 +1,16 @@ +import sys + +PY3 = sys.version_info[0] >= 3 + +if PY3: + unicode = str + basestring = str + xrange = range + from io import StringIO + from io import BytesIO +else: + unicode = unicode + basestring = basestring + xrange = xrange + from StringIO import StringIO + from StringIO import StringIO as BytesIO diff --git a/SUEWSPrepare/Modules/xlutils/filter.py b/SUEWSPrepare/Modules/xlutils/filter.py index 56843c4..fd7047c 100644 --- a/SUEWSPrepare/Modules/xlutils/filter.py +++ b/SUEWSPrepare/Modules/xlutils/filter.py @@ -3,6 +3,7 @@ # This Software is released under the MIT License: # http://www.opensource.org/licenses/mit-license.html # See license.txt for more details. +from __future__ import print_function import logging import os @@ -15,6 +16,9 @@ from xlutils.display import quoted_sheet_name,cell_display from xlutils.margins import cells_all_junk from xlwt.Style import default_style +from .compat import xrange + + logger = logging.getLogger('xlutils.filter') class BaseReader: @@ -622,7 +626,7 @@ def get_stream(self,filename): Returns a stream for the file in the configured directory with the specified name. """ - return file(os.path.join(self.dir_path,filename),'wb') + return open(os.path.join(self.dir_path, filename), 'wb') class StreamWriter(BaseWriter): "A writer for writing exactly one workbook to the supplied stream" @@ -696,8 +700,8 @@ def __init__(self,name=None,methods=True): def method(self,name,*args): if self.name: - print repr(self.name), - print "%s:%r"%(name,args) + print(repr(self.name), end=' ') + print("%s:%r" % (name, args)) try: from guppy import hpy diff --git a/SUEWSPrepare/Modules/xlutils/margins.py b/SUEWSPrepare/Modules/xlutils/margins.py index b875bc6..305439a 100644 --- a/SUEWSPrepare/Modules/xlutils/margins.py +++ b/SUEWSPrepare/Modules/xlutils/margins.py @@ -1,7 +1,10 @@ # -*- coding: ascii -*- +from __future__ import print_function import sys, glob, string +from .compat import xrange, unicode + try: from xlrd import open_workbook, XL_CELL_EMPTY, XL_CELL_BLANK, XL_CELL_TEXT, XL_CELL_NUMBER, cellname null_cell_types = (XL_CELL_EMPTY, XL_CELL_BLANK) @@ -66,8 +69,8 @@ def safe_encode(ustr, encoding): return repr(ustr) def check_file(fname, verbose, do_punc=False, fmt_info=0, encoding='ascii', onesheet=''): - print - print fname + print() + print(fname) if do_punc: checker = ispunc else: @@ -114,28 +117,32 @@ def check_file(fname, verbose, do_punc=False, fmt_info=0, encoding='ascii', ones for rowx in xrange(sheet.nrows): cell = sheet.cell(rowx, lastcolx) if cell.ctype != XL_CELL_EMPTY: - print "%s (%d, %d): type %d, value %r" % ( - cellname(rowx, lastcolx), rowx, lastcolx, cell.ctype, cell.value) + print("%s (%d, %d): type %d, value %r" % ( + cellname(rowx, lastcolx), + rowx, lastcolx, cell.ctype, cell.value + )) if (verbose or ngoodrows != sheet.nrows or ngoodcols != sheet.ncols - or (verbose >= 2 and ngoodcells and sheet_density_pct < 90.0) + or (verbose >= 2 and sheet_density_pct < 90.0) ): if oldncells: pctwaste = (1.0 - float(newncells) / oldncells) * 100.0 else: pctwaste = 0.0 shname_enc = safe_encode(sheet.name, encoding) - print "sheet #%2d: RxC %5d x %3d => %5d x %3d; %4.1f%% waste%s (%s)" \ + print( + "sheet #%2d: RxC %5d x %3d => %5d x %3d; %4.1f%% waste%s (%s)" % (shx, sheet.nrows, sheet.ncols, - ngoodrows, ngoodcols, pctwaste, sheet_density_pct_s, shname_enc) + ngoodrows, ngoodcols, pctwaste, + sheet_density_pct_s, shname_enc)) if hasattr(book, 'unload_sheet'): book.unload_sheet(shx) if totold: pctwaste = (1.0 - float(totnew) / totold) * 100.0 else: pctwaste = 0.0 - print "%d cells => %d cells; %4.1f%% waste" % (totold, totnew, pctwaste) + print("%d cells => %d cells; %4.1f%% waste" % (totold, totnew, pctwaste)) def main(): import optparse @@ -177,7 +184,7 @@ def main(): options.formatting, encoding, options.onesheet) except: e1, e2 = sys.exc_info()[:2] - print "*** File %s => %s:%s" % (fname, e1.__name__, e2) + print("*** File %s => %s:%s" % (fname, e1.__name__, e2)) if __name__ == "__main__": main() diff --git a/SUEWSPrepare/Modules/xlutils/save.py b/SUEWSPrepare/Modules/xlutils/save.py index ed85f12..ed10f4a 100644 --- a/SUEWSPrepare/Modules/xlutils/save.py +++ b/SUEWSPrepare/Modules/xlutils/save.py @@ -6,6 +6,8 @@ import os from xlutils.filter import process,XLRDReader,StreamWriter +from .compat import basestring + def save(wb, filename_or_stream): "Save the supplied :class:`xlrd.Book` to the supplied stream or filename." diff --git a/SUEWSPrepare/Modules/xlutils/styles.py b/SUEWSPrepare/Modules/xlutils/styles.py index 59a0222..87080dd 100644 --- a/SUEWSPrepare/Modules/xlutils/styles.py +++ b/SUEWSPrepare/Modules/xlutils/styles.py @@ -4,6 +4,8 @@ # http://www.opensource.org/licenses/mit-license.html # See license.txt for more details. +from .compat import xrange + class NamedStyle: """ An object with ``name`` and ``xf`` attributes representing diff --git a/SUEWSPrepare/Modules/xlutils/tests/compat.py b/SUEWSPrepare/Modules/xlutils/tests/compat.py new file mode 100644 index 0000000..19dcff9 --- /dev/null +++ b/SUEWSPrepare/Modules/xlutils/tests/compat.py @@ -0,0 +1,54 @@ +# Copyright (c) 2011-2013 Simplistix Ltd, 2015 Chris Withers +# See license.txt for license details. + +# This module contains bits and pieces to achieve compatibility across all the +# versions of python supported. + +import doctest +import re +import textwrap + +import manuel +from manuel.codeblock import ( + CODEBLOCK_START, + CODEBLOCK_END, + CodeBlock, + execute_code_block, + ) + +from ..compat import PY3 + +BYTE_LITERALS = re.compile("b((:?'.*?')|(:?\".*?\"))", re.MULTILINE) +UNICODE_LITERALS = re.compile("u((:?'.*?')|(:?\".*?\"))", re.MULTILINE) + + +def version_agnostic(text): + if PY3: + regex = UNICODE_LITERALS + else: + regex = BYTE_LITERALS + return regex.sub('\\1', text) + + +def find_code_blocks(document): + for region in document.find_regions(CODEBLOCK_START, CODEBLOCK_END): + start_end = CODEBLOCK_START.search(region.source).end() + source = version_agnostic(textwrap.dedent(region.source[start_end:])) + source = 'from __future__ import print_function\n' + source + source_location = '%s:%d' % (document.location, region.lineno) + code = compile(source, source_location, 'exec', 0, True) + document.claim_region(region) + region.parsed = CodeBlock(code, source) + + +class Manuel(manuel.Manuel): + def __init__(self): + manuel.Manuel.__init__(self, [find_code_blocks], [execute_code_block]) + + +class DocTestChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + want = version_agnostic(want) + return doctest.OutputChecker.check_output( + self, want, got, optionflags + ) diff --git a/SUEWSPrepare/Modules/xlutils/tests/test_docs.py b/SUEWSPrepare/Modules/xlutils/tests/test_docs.py index c696f42..8d6b27a 100644 --- a/SUEWSPrepare/Modules/xlutils/tests/test_docs.py +++ b/SUEWSPrepare/Modules/xlutils/tests/test_docs.py @@ -5,17 +5,16 @@ # See license.txt for more details. from doctest import REPORT_NDIFF, ELLIPSIS -from fixtures import test_files from glob import glob from manuel import doctest from manuel.testing import TestSuite from testfixtures import LogCapture,TempDirectory from os.path import dirname, join, pardir -import os +from . import compat +from .fixtures import test_files -workspace = os.environ.get('WORKSPACE', join(dirname(__file__), pardir, pardir)) -tests = glob(join(workspace, 'docs', '*.txt')) +tests = glob(join(join(dirname(__file__), pardir, pardir), 'docs', '*.rst')) options = REPORT_NDIFF|ELLIPSIS @@ -29,7 +28,8 @@ def tearDown(test): LogCapture.uninstall_all() def test_suite(): - m = doctest.Manuel(optionflags=REPORT_NDIFF|ELLIPSIS) + m = doctest.Manuel(optionflags=REPORT_NDIFF | ELLIPSIS, + checker=compat.DocTestChecker()) return TestSuite(m, *tests, setUp=setUp, tearDown=tearDown) diff --git a/SUEWSPrepare/Modules/xlutils/tests/test_filter.py b/SUEWSPrepare/Modules/xlutils/tests/test_filter.py index c0dfa08..bbd73d8 100644 --- a/SUEWSPrepare/Modules/xlutils/tests/test_filter.py +++ b/SUEWSPrepare/Modules/xlutils/tests/test_filter.py @@ -7,7 +7,6 @@ # See license.txt for more details. from mock import Mock -from StringIO import StringIO from tempfile import TemporaryFile from testfixtures import compare, Comparison as C, replace, log_capture, ShouldRaise, tempdir from unittest import TestSuite,TestCase,makeSuite @@ -15,6 +14,8 @@ from xlrd.formatting import XF from xlutils.filter import BaseReader,GlobReader,MethodFilter,BaseWriter,process,XLRDReader,XLWTWriter, BaseFilter from xlutils.tests.fixtures import test_files,test_xls_path,make_book,make_sheet,DummyBook +from ..compat import StringIO, PY3 +from .compat import version_agnostic as va import os @@ -248,7 +249,10 @@ def setUp(self): self.called = [] def test_cmp(self): - cmp(MethodFilter(),OurMethodFilter([])) + if PY3: + MethodFilter() == OurMethodFilter([]) + else: + cmp(MethodFilter(), OurMethodFilter([])) def do_calls_and_test(self,filter): filter.next = tf = Mock() @@ -391,6 +395,7 @@ def setUp(self): @replace('xlutils.filter.guppy',True) @replace('xlutils.filter.hpy',Mock(),strict=False) def test_method(self,hpy): + # XXX what are we logging? self.filter.method('name','foo',1) # hpy().heap().stat.dump('somepath') compare(hpy.call_args_list,[((),{})]) @@ -449,8 +454,12 @@ def test_set_rdsheet_1(self,h): f.finish() compare(c.method_calls,[]) h.check( - ('xlutils.filter','ERROR',"Cell A1 of sheet 'Sheet2' contains a bad value: error (#NULL!)"), - ('xlutils.filter','ERROR','No output as errors have occurred.'), + ('xlutils.filter', + 'ERROR', + va("Cell A1 of sheet b'Sheet2' contains a bad value: error (#NULL!)")), + ('xlutils.filter', + 'ERROR', + 'No output as errors have occurred.'), ) @log_capture() @@ -540,7 +549,7 @@ def test_start(self,d): f.temp_path = d.path f.prefix = 'junk' j = open(os.path.join(d.path,'junk.xls'),'wb') - j.write('junk') + j.write(b'junk') j.close() f.start() @@ -698,8 +707,8 @@ def test_use_write_sheet_name_in_logging(self,h): h.check(( 'xlutils.filter', 'DEBUG', - "Number of columns trimmed from 2 to 1 for sheet 'new'" - )) + va("Number of columns trimmed from 2 to 1 for sheet b'new'") + )) @log_capture() def test_multiple_books(self,h): @@ -1049,7 +1058,7 @@ def test_single_workbook_with_all_features(self): w = TestWriter() r(w) # check stuff on the writer - self.assertEqual(w.files.keys(),['testall.xls']) + compare(w.files.keys(), expected=['testall.xls']) self.failUnless('testall.xls' in w.closed) self.check_file(w,test_xls_path) @@ -1083,7 +1092,7 @@ def test_single_workbook_no_formatting(self): w = TestWriter() r(w) # check stuff on the writer - self.assertEqual(w.files.keys(),['testnoformatting.xls']) + compare(w.files.keys(), expected=['testnoformatting.xls']) self.failUnless('testnoformatting.xls' in w.closed) self.check_file(w,test_xls_path, l_a_xf_list=17, @@ -1099,7 +1108,10 @@ def test_multiple_workbooks(self): w = TestWriter() r(w) # check stuff on the writer - self.assertEqual(w.files.keys(),['test.xls','testnoformatting.xls','testall.xls']) + compare( + sorted(w.files.keys()), + expected=['test.xls', 'testall.xls', 'testnoformatting.xls'] + ) self.failUnless('test.xls' in w.closed) self.failUnless('testall.xls' in w.closed) self.failUnless('testnoformatting.xls' in w.closed) @@ -1160,7 +1172,7 @@ def test_set_rd_sheet(self): w.cell(1,0,3,0) w.finish() # check everything got written and closed - self.assertEqual(w.files.keys(),['new.xls']) + compare(w.files.keys(), expected=['new.xls']) self.failUnless('new.xls' in w.closed) # now check the cells written f = w.files['new.xls'].file @@ -1202,7 +1214,7 @@ def test_max_length_sheet_name(self): ) w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) f = w.files['test.xls'].file a = open_workbook(file_contents=f.read(), formatting_info=1) self.assertEqual(a.sheet_names(),[name]) @@ -1226,7 +1238,7 @@ def test_panes(self): w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) f = w.files['test.xls'].file a = open_workbook(file_contents=f.read(),formatting_info=1) sheet = a.sheet_by_index(0) @@ -1259,7 +1271,7 @@ def test_splits(self): w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) f = w.files['test.xls'].file a = open_workbook(file_contents=f.read(),formatting_info=1) sheet = a.sheet_by_index(0) @@ -1286,7 +1298,7 @@ def test_zoom_factors(self): w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) f = w.files['test.xls'].file a = open_workbook(file_contents=f.read(),formatting_info=1) sheet = a.sheet_by_index(0) @@ -1311,7 +1323,7 @@ def test_copy_error_cells(self): ) w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) a = open_workbook(file_contents=w.files['test.xls'].file.read()) cell = a.sheet_by_index(0).cell(0,0) self.assertEqual(cell.ctype,XL_CELL_ERROR) @@ -1323,7 +1335,7 @@ def test_copy_boolean_cells(self): ) w = TestWriter() r(w) - self.assertEqual(w.files.keys(),['test.xls']) + compare(w.files.keys(), expected=['test.xls']) a = open_workbook(file_contents=w.files['test.xls'].file.read()) cell = a.sheet_by_index(0).cell(0,0) self.assertEqual(cell.ctype,XL_CELL_BOOLEAN) diff --git a/SUEWSPrepare/Modules/xlutils/tests/test_save.py b/SUEWSPrepare/Modules/xlutils/tests/test_save.py index eac8847..bd730c5 100644 --- a/SUEWSPrepare/Modules/xlutils/tests/test_save.py +++ b/SUEWSPrepare/Modules/xlutils/tests/test_save.py @@ -6,9 +6,8 @@ import os from mock import Mock -from shutil import rmtree -from StringIO import StringIO -from tempfile import mkdtemp,TemporaryFile +from ..compat import StringIO +from tempfile import TemporaryFile from testfixtures import replace,tempdir from unittest import TestSuite,TestCase,makeSuite from xlutils.save import save @@ -34,10 +33,9 @@ def test_save_path(self,c,d): w = args[1] self.failUnless(isinstance(w,StreamWriter)) f = w.stream - self.failUnless(isinstance(f,file)) - self.assertEqual(f.name,path) - self.assertEqual(f.mode,'wb') - self.assertEqual(f.closed,True) + self.assertEqual(f.name, path) + self.assertEqual(f.mode, 'wb') + self.assertEqual(f.closed, True) @replace('xlutils.save.process',Mock()) def test_save_stringio(self,c): diff --git a/SUEWSPrepare/Modules/xlutils/tests/test_styles.py b/SUEWSPrepare/Modules/xlutils/tests/test_styles.py index 0eae042..fdb0b04 100644 --- a/SUEWSPrepare/Modules/xlutils/tests/test_styles.py +++ b/SUEWSPrepare/Modules/xlutils/tests/test_styles.py @@ -3,9 +3,8 @@ # This Software is released under the MIT License: # http://www.opensource.org/licenses/mit-license.html # See license.txt for more details. - from mock import Mock -from testfixtures import should_raise +from testfixtures import ShouldRaise from unittest import TestSuite,TestCase,makeSuite from xlutils.styles import Styles @@ -45,17 +44,9 @@ def test_multiple_names_for_xfi_bad_1(self): 'A':(0,0), 'B':(0,0), } - styles = should_raise(Styles,AssertionError) - styles(self.wb) - - def test_multiple_names_for_xfi_bad_2(self): - self.wb.style_name_map = { - 'A':(0,0), - '':(0,0), - } - styles = should_raise(Styles,AssertionError) - styles(self.wb) - + with ShouldRaise(AssertionError()): + Styles(self.wb) + def test_suite(): return TestSuite(( makeSuite(TestStyles), diff --git a/SUEWSPrepare/Modules/xlutils/tests/test_view.py b/SUEWSPrepare/Modules/xlutils/tests/test_view.py index f76cc8e..5dc7da7 100644 --- a/SUEWSPrepare/Modules/xlutils/tests/test_view.py +++ b/SUEWSPrepare/Modules/xlutils/tests/test_view.py @@ -11,6 +11,8 @@ from xlutils.view import View, Row, Col, CheckerView from xlutils.tests.fixtures import test_files +from .compat import PY3 + class Check(object): @@ -156,8 +158,44 @@ def test_matches(self): def test_does_not_match(self): - with ShouldRaise(AssertionError('''\ -Sequence not as expected: + with ShouldRaise(AssertionError) as s: + CheckerView(path.join(test_files,'testall.xls'))['Sheet1'].compare( + (u'R0C0', u'R0C1'), + (u'R1C0', u'R1C1'), + (u'A merged cell', ''), + ('', ''), + ('', ''), + (u'More merged cells', 'XX') + ) + + if PY3: + expected="""\ +sequence not as expected: + +same: +(('R0C0', 'R0C1'), ('R1C0', 'R1C1'), ('A merged cell', ''), ('', ''), ('', '')) + +expected: +(('More merged cells', 'XX'),) + +actual: +(('More merged cells', ''),) + +While comparing [5]: sequence not as expected: + +same: +('More merged cells',) + +expected: +('XX',) + +actual: +('',) + +While comparing [5][1]: 'XX' (expected) != '' (actual)""" + else: + expected='''\ +sequence not as expected: same: ((u'R0C0', u'R0C1'), @@ -166,16 +204,23 @@ def test_does_not_match(self): ('', ''), ('', '')) -first: +expected: ((u'More merged cells', 'XX'),) -second: -((u'More merged cells', ''),)''')): - CheckerView(path.join(test_files,'testall.xls'))['Sheet1'].compare( - (u'R0C0', u'R0C1'), - (u'R1C0', u'R1C1'), - (u'A merged cell', ''), - ('', ''), - ('', ''), - (u'More merged cells', 'XX') - ) +actual: +((u'More merged cells', u''),) + +While comparing [5]: sequence not as expected: + +same: +(u'More merged cells',) + +expected: +('XX',) + +actual: +(u'',) + +While comparing [5][1]: 'XX' (expected) != u'' (actual)''' + + compare(expected, actual=str(s.raised)) diff --git a/SUEWSPrepare/Modules/xlutils/version.txt b/SUEWSPrepare/Modules/xlutils/version.txt index 943f9cb..227cea2 100644 --- a/SUEWSPrepare/Modules/xlutils/version.txt +++ b/SUEWSPrepare/Modules/xlutils/version.txt @@ -1 +1 @@ -1.7.1 +2.0.0 diff --git a/SUEWSPrepare/Modules/xlutils/view.py b/SUEWSPrepare/Modules/xlutils/view.py index a09638f..277dd81 100644 --- a/SUEWSPrepare/Modules/xlutils/view.py +++ b/SUEWSPrepare/Modules/xlutils/view.py @@ -8,6 +8,8 @@ from xlrd import open_workbook, XL_CELL_DATE, xldate_as_tuple from xlwt.Utils import col_by_name +from .compat import xrange + class Index(object): def __init__(self, name): self.name = name @@ -139,7 +141,7 @@ def compare(self, *expected): # late import in case testfixtures isn't around! from testfixtures import compare as _compare - _compare(expected, tuple(actual)) + _compare(expected, actual=tuple(actual)) class CheckerView(View): """ diff --git a/SUEWSPrepare/Modules/xlwt/Autofit.py b/SUEWSPrepare/Modules/xlwt/Autofit.py deleted file mode 100644 index 910f0c4..0000000 --- a/SUEWSPrepare/Modules/xlwt/Autofit.py +++ /dev/null @@ -1,484 +0,0 @@ -# -*- coding: windows-1252 -*- - -#------------------------------------------------------------------------------- -# Name: Autofit -# Purpose: Emulates the render-time function to 'Autofit Selection' the column width to the data it contains -# -# Author: Warwick Prince warwickp@mushroomsys.com -# -# Created: 30/01/12 9:15 AM -# Copyright: (c) 2012 Mushroom Systems International Pty. Ltd. -# Licence: You are free to use this code in any way you see fit - but you must retain this license message -#------------------------------------------------------------------------------- -#!/usr/bin/env python - -import xlwt.Formatting as Formatting -from xlwt.Worksheet import Worksheet - -class TempWorkbook(object): - """ - Used as a holder for some copies of critical dicts that we need to reverse key for our needs - """ - - def locateAFFont(self, font): - """ - Using the Formatting.Font supplied, look up an appropriate AFFont (AutoFit Font) handler - """ - - fontName = 'AFFont%s' % font.name.replace(' ', '_') - - # Look in font cache - if fontName not in self.fontCache: - # Do we support this font? If not, use the base handler - cachedFont = globals().get(fontName, AFFont) - - # Add to cache - self.fontCache[fontName] = cachedFont() - - return self.fontCache[fontName] - -class AFFont(object): - """ - Base Autofit Font class (Arial). Extend this for each specific Font supported - """ - - # Gutter width to add on the end of the content - treat as "right hand padding' - AUTOFIT_GUTTER = 255 - # Adjust this to factor the width against Arial. i.e. 50 means that this font is half the width of Arial at the same font size - # This is only used when the new font is a direct relationship to Arial. For others, a new AUTOFIT_CHAR_MAP is required - this can - # be generated using CreateLearningSheet and GenerateAFFont. - AUTOFIT_FONT_FACTOR = 100.00 - - # Paste in the character mapping created by 'GenerateAFFont' here - AUTOFIT_CHAR_MAP = {' ': [(134.6, 0.2724), (134.6, 0.2724)], - '!': [(134.6, 0.2724), (155.9, 0.2292)], - '"': [(164.4, 0.2114), (215.6, 0.2114)], - '#': [(249.8, 0.1872), (249.8, 0.1872)], - '$': [(249.8, 0.1872), (249.8, 0.1872)], - '%': [(394.8, 0.22), (394.8, 0.2284)], - '&': [(301.0, 0.2554), (322.3, 0.212)], - "'": [(96.2, 0.2554), (113.2, 0.1944)], - '(': [(155.9, 0.2292), (155.9, 0.2292)], - ')': [(155.9, 0.2292), (155.9, 0.2292)], - '*': [(181.5, 0.2548), (181.5, 0.2548)], - '+': [(266.8, 0.2882), (266.8, 0.2882)], - ',': [(134.6, 0.2724), (134.6, 0.2724)], - '-': [(155.9, 0.2292), (155.9, 0.2292)], - '.': [(134.6, 0.2724), (134.6, 0.2724)], - '/': [(134.6, 0.2724), (134.6, 0.2724)], - '0': [(249.8, 0.1872), (249.8, 0.1872)], - '1': [(224.2, 0.3066), (228.4, 0.2284)], - '2': [(249.8, 0.1872), (249.8, 0.1872)], - '3': [(249.8, 0.1872), (249.8, 0.1872)], - '4': [(249.8, 0.1872), (249.8, 0.1872)], - '5': [(249.8, 0.1872), (249.8, 0.1872)], - '6': [(249.8, 0.1872), (249.8, 0.1872)], - '7': [(249.8, 0.1872), (249.8, 0.1872)], - '8': [(249.8, 0.1872), (249.8, 0.1872)], - '9': [(249.8, 0.1872), (249.8, 0.1872)], - ':': [(134.6, 0.2724), (155.9, 0.2292)], - ';': [(134.6, 0.2724), (155.9, 0.2292)], - '<': [(266.8, 0.2882), (266.8, 0.2882)], - '=': [(266.8, 0.2882), (266.8, 0.2882)], - '>': [(266.8, 0.2882), (266.8, 0.2882)], - '?': [(249.8, 0.1872), (275.4, 0.2298)], - '@': [(450.3, 0.2462), (433.2, 0.254)], - 'A': [(301.0, 0.2554), (322.3, 0.212)], - 'B': [(301.0, 0.2554), (322.3, 0.212)], - 'C': [(322.3, 0.212), (322.3, 0.212)], - 'D': [(322.3, 0.212), (322.3, 0.212)], - 'E': [(301.0, 0.2554), (301.0, 0.2554)], - 'F': [(275.4, 0.2298), (275.4, 0.2298)], - 'G': [(347.9, 0.2376), (347.9, 0.2376)], - 'H': [(322.3, 0.212), (322.3, 0.212)], - 'I': [(134.6, 0.2724), (134.6, 0.2724)], - 'J': [(228.4, 0.2456), (249.8, 0.1872)], - 'K': [(301.0, 0.2554), (322.3, 0.212)], - 'L': [(249.8, 0.1872), (275.4, 0.2298)], - 'M': [(369.2, 0.1944), (369.2, 0.1944)], - 'N': [(322.3, 0.212), (322.3, 0.212)], - 'O': [(347.9, 0.2376), (347.9, 0.2376)], - 'P': [(301.0, 0.2554), (301.0, 0.2554)], - 'Q': [(347.9, 0.2376), (347.9, 0.2376)], - 'R': [(322.3, 0.212), (322.3, 0.212)], - 'S': [(301.0, 0.2554), (301.0, 0.2554)], - 'T': [(275.4, 0.2298), (275.4, 0.2212)], - 'U': [(322.3, 0.212), (322.3, 0.212)], - 'V': [(301.0, 0.2554), (301.0, 0.2554)], - 'W': [(420.4, 0.254), (420.4, 0.2626)], - 'X': [(296.7, 0.178), (301.0, 0.2554)], - 'Y': [(301.0, 0.2554), (301.0, 0.2554)], - 'Z': [(275.4, 0.2298), (275.4, 0.2298)], - 'a': [(249.8, 0.1872), (249.8, 0.1872)], - 'b': [(249.8, 0.1872), (275.4, 0.2298)], - 'c': [(228.4, 0.2456), (249.8, 0.1872)], - 'd': [(249.8, 0.1872), (275.4, 0.2298)], - 'e': [(249.8, 0.1872), (249.8, 0.1872)], - 'f': [(126.0, 0.254), (155.9, 0.2292)], - 'g': [(249.8, 0.1872), (275.4, 0.2298)], - 'h': [(249.8, 0.1872), (275.4, 0.2298)], - 'i': [(109.0, 0.2384), (134.6, 0.2724)], - 'j': [(109.0, 0.2384), (134.6, 0.2724)], - 'k': [(228.4, 0.2456), (249.8, 0.1872)], - 'l': [(109.0, 0.2384), (134.6, 0.2724)], - 'm': [(373.5, 0.2804), (394.8, 0.22)], - 'n': [(249.8, 0.1872), (275.4, 0.2298)], - 'o': [(249.8, 0.1872), (275.4, 0.2298)], - 'p': [(249.8, 0.1872), (275.4, 0.2298)], - 'q': [(249.8, 0.1872), (275.4, 0.2298)], - 'r': [(155.9, 0.2292), (181.5, 0.2548)], - 's': [(228.4, 0.2456), (249.8, 0.1872)], - 't': [(134.6, 0.2724), (155.9, 0.2292)], - 'u': [(249.8, 0.1872), (275.4, 0.2298)], - 'v': [(228.4, 0.237), (249.8, 0.1872)], - 'w': [(322.3, 0.212), (347.9, 0.2376)], - 'x': [(228.4, 0.237), (249.8, 0.1872)], - 'y': [(228.4, 0.2456), (249.8, 0.1872)], - 'z': [(228.4, 0.2456), (228.4, 0.2456)]} - - def measureText(self, text, xlwtFont): - """ - Returns the best estimate at the actual width of the text supplied, given the font and size etc. - """ - - # How does it work? - # Using the sample sheet of every supported char, measurements were taken of 10 chars and 100 char samples - # in both normal and bolded text. Then, various factors were calculated and stored in a character map for the - # given font. Excel has some strange rendering behaviour.. The width is not directly proportional to the number - # of letters in a given string. e.g. The width of 100 'A's is not equal to 10 x the width of 10 'A's. This makes things a - # little more complicated. I calculated a general "creep factor' which is the amount the apparent letter size - # changes given the number of them there are in a string. This is calculated by comparisons of 10 and 100 of the same - # letter. The creep factor is then included in the calc for each letter in the text being measured. Not perfect, but - # pretty damn close! :-) - - width = 0 - - # Map to use the 0th or 1st element of the widths list, based on bold True or False bold attribute of font - useWidth = {False:0, True:1}.get(xlwtFont.bold, 0) - - # Iterate over the text, looking up it's width and adjusting for it's position in the overall length (creep) - for count, letter in enumerate(text): - width10, creepFactor = self.AUTOFIT_CHAR_MAP.get(letter, self.AUTOFIT_CHAR_MAP['W'])[useWidth] - widthThisLetter = width10 - (count * creepFactor) - width += widthThisLetter - - # Factoring for "Arial" -> "MyFont" transforms - width *= (self.AUTOFIT_FONT_FACTOR / 100.00) - - # Height adjustment factor is the base font size of 10 (stored as 200) So if it's 20 (400) its 400/200 times the size i.e. 2 - width *= (xlwtFont.height / 200.00) - - # Finally, add the gutter width on the end - width += self.AUTOFIT_GUTTER - - return round(width, 0) - -class AFFontArial(AFFont): - """ - Specific support for Arial Font - """ - - # Note - All the values in the base font are set for Arial, so I don't need to do anything here. Simply overload the values - # that you need to change here - pass - -def Autofit(sheet, fromRowx=0, toRowx=-1, fromColx=0, toColx=-1, emptyCellsAreZero=True): - - """ - Performs a best-efforts auto fit on the column(s) and row(s) provided. - - emptyCellsAreZero tells the autofit logic to treat empty as 0 width. Set to false to treat empty as default to standard column width - - Row and column ranges can be supplied, however, leaving them all as default will autofit the entire sheet. - - """ - - assert isinstance(sheet, Worksheet), 'Sheet must be a Workbook.Worksheet instance!' - - # Set some defaults if open ranges supplied - if toRowx == -1: - toRowx = sheet.last_used_row - if toColx == -1: - toColx = sheet.last_used_col - - assert 0 <= fromColx <= sheet.last_used_col, 'FromColX out of bounds. 0 - %d' % sheet.last_used_col - assert 0 <= toColx <= sheet.last_used_col, 'ToColX out of bounds. 0 - %d' % sheet.last_used_col - assert 0 <= fromRowx <= sheet.last_used_row, 'FromRowX out of bounds. 0 - %d' % sheet.last_used_row - assert 0 <= toRowx <= sheet.last_used_row, 'ToRowX out of bounds. 0 - %d' % sheet.last_used_row - assert fromColx <= toColx, 'ToColx must be <= FromColx' - assert fromRowx <= toRowx, 'ToRowx must be <= FromRowx' - - workBook = sheet._Worksheet__parent - styles = workBook._Workbook__styles - - # As we need to search a number of dicts that are keyed in the *wrong way* for our needs, we will create copies here temporarily to - # speed things up. They are stored 'A':1 when I need to locate key 1 - so they are transformed to 1:'A' - # Note: If this causes memory issues on very large sheets, then it could be changed to search on the original dicts - it would - # just be a LOT slower. - - tempWB = TempWorkbook() - - tempWB.styles = styles - tempWB.style_xfByID = {value:key for key, value in styles._xf_id2x.items()} - tempWB.style_fontsByID = {value:key for key, value in styles._font_id2x.items()} - tempWB.str_indexesByID = {value:key for key, value in workBook._Workbook__sst._str_indexes.items()} - tempWB.style_numberFormats = {value:key for key, value in styles._num_formats.items()} - tempWB.sheet = sheet - tempWB.fontCache = {'Arial':AFFontArial()} - tempWB.emptyCellsAreZero = emptyCellsAreZero - - # For performance, I'm moving all the row objects in range into a specific selected rows list to iterate over, - # as it's possibly done a number of times. - rowRange = [sheet.rows[idx] for idx in xrange(fromRowx, toRowx+1)] - - for colx in xrange(fromColx, toColx+1): - autofitWidth = 0 - # Loop over all the rows - for row in rowRange: - # Get the width of the cell - cellWidth = GetCellWidth(tempWB, row, colx) - - if cellWidth > autofitWidth: - autofitWidth = cellWidth - - # Set the column to the calculated width - sheet.col(colx).width = autofitWidth - -def GetCellWidth(workBook, row, colx): - """ - By looking into the cell class type, we can determine how to get the original value of the cell back. In the case of numbers, times - and dates, we are forced to then format them as per the formatting settings.. We then pass that value to the font specific - handler for estimating the width of the string value - """ - - try: - # The cell may be out of bounds on this row - cell = row._Row__cells[colx] - except KeyError: - # Return the default width of the column or zero - if workBook.emptyCellsAreZero: - return 0 - return workBook.sheet.col(colx).width - - cellClass = 'Handle%s' % cell.__class__.__name__ - - Handler = globals().get(cellClass, HandleDefaultCell) - - return Handler(workBook, row, cell) - -def HandleDefaultCell(workBook, row, cell): - """ - Process general or string type cells, extracting the value from the central repository of all string values in the workbook. - - If the cell class does not support shared string array logic, then no processing can occur and a default width will be returned. - """ - - # Get the sheet main column record - sheetCol = workBook.sheet.col(cell.colx) - - # Set the default answer to be the current column width - width = sheetCol.width - - if not hasattr(cell, 'sst_idx'): - # We can not work out what the width of this is - just return the columns width - return width - - # Get the cell contents - cellValue = workBook.str_indexesByID[cell.sst_idx] - - if cellValue: - # Get the xf formatting index from either this cell this row, or the sheet column default - if cell.xf_idx > 15: - xf_idx = cell.xf_idx - elif row._Row__xf_index > 15: - xf_idx = row._Row__xf_index - else: - xf_idx = sheetCol._xf_idx - - # Look up the xf formatting record by index - xf_rec = workBook.style_xfByID.get(xf_idx, None) - # If we found our style, then get the font index from it - if xf_rec: - fontID = xf_rec[0] - font = workBook.style_fontsByID.get(fontID, None) - - if font: - # Get an Autofit Font manager from the cache - fontManager = workBook.locateAFFont(font) - # We have a font, so now use its details to process the text - width = fontManager.measureText(cellValue, font) - - return width - -def HandleStrCell(workBook, row, cell): - """ - Will handle string like cell data using the default handler - """ - - return HandleDefaultCell(workBook, row, cell) - -def HandleBlankCell(workBook, row, cell): - """ - Will handle blank cells using the default handler - """ - - if workBook.emptyCellsAreZero: - return 0 - - return HandleDefaultCell(workBook, row, cell) - -def HandleMulBlankCell(workBook, row, cell): - """ - Will handle multi blank cells using the default handler - """ - - if workBook.emptyCellsAreZero: - return 0 - - return HandleDefaultCell(workBook, row, cell) - -def HandleNumberCell(workBook, row, cell): - """ - Will handle number like cells. This is actually one of the hardest (date is the other) because of the various format options - """ - - # Get the sheet main column record - sheetCol = workBook.sheet.col(cell.colx) - - # Default to the columns width - width = sheetCol.width - - # Get the xf formatting index from either this cell this row, or the sheet column default - - if cell.xf_idx > 15: - xf_idx = cell.xf_idx - elif row._Row__xf_index > 15: - xf_idx = row._Row__xf_index - else: - xf_idx = sheetCol._xf_index - - # Look up the xf formatting record by index - xf_rec = workBook.style_xfByID.get(xf_idx, None) - # If we found our style, then get the number format index from it - - if xf_rec: - formatID = xf_rec[1] - format = workBook.style_numberFormats.get(formatID, None) - - if format: - # We have a format string, work out if possible the final format of our number as if rendered in Excel.. - if format == '0': - cellValue = str(int(cell.number)) - elif format == '0.00': - cellValue = '%0.2f' % cell.number - elif format == '#,##0': - cellValue = '{:,}'.format(int(cell.number)) - elif format == '#,##0.00': - # I feel quite sure this can be done better than this.. - cellValue = '{,f}'.format(cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - elif format == '"$"#,##0_);("$"#,##' or format == '"$"#,##0_);[Red]("$"#,##': - cellValue = '${:,}'.format(int(cell.number)) - elif format == '"$"#,##0.00_);("$"#,##' or format == '"$"#,##0.00_);[Red]("$"#,##': - # I feel quite sure this can be done better than this.. - cellValue = '${,f}'.format(cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - elif format == '0%': - cellValue = '%d%%' % int(cell.number) - elif format == '0.00%': - cellValue = '%0.2f%%' % cell.number - elif format == '0.00E+00': - cellValue = '%0.02E' % cell.number - elif format == '# ?/?': - cellValue = '9 9/9' - elif format == '# ??/??': - cellValue = '9 99/99' - elif format == 'M/D/YY': - cellValue = '99/99/99' - elif format == 'D-MMM-YY': - cellValue = '99-WWW-99' - elif format == 'MMM-YY': - cellValue = 'WWW-99' - elif format == 'h:mm AM/PM': - cellValue = '99:99 pm' - elif format == 'h:mm:ss AM/PM': - cellValue = '99:99:99 pm' - elif format.lower() == 'h:mm': - cellValue = '99:99' - elif format.lower() == 'h:mm:ss': - cellValue = '99:99:99' - elif format == 'M/D/YY h:mm': - cellValue = '99/99/99 99:99' - elif format == '_(#,##0_);(#,##0)' or format == '_(#,##0_);[Red](#,##0)': - if cell.number < 0.00: - cellValue = '({:,})'.format(-cell.number) - else: - cellValue = '{:,}'.format(cell.number) - elif format == '_(#,##0.00_);(#,##0.00)' or format == '_(#,##0.00_);[Red](#,##0.00)': - if cell.number < 0.00: - cellValue = '${,f}'.format(-cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - cellValue = '(%s)' % cellValue - else: - cellValue = '${,f}'.format(cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - elif format == '_("$"* #,##0_);_("$"* (#,##0);_("$"* "-"_);_(@_)' or format == '_(* #,##0_);_(* (#,##0);_(* "-"_);_(@_)': - if cell.number < 0.00: - cellValue = '(${:,})'.format(-cell.number) - else: - cellValue = '${:,}'.format(cell.number) - elif format == '_("$"* #,##0.00_);_("$"* (#,##0.00);_("$"* "-"??_);_(@_)' or format == '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(@_)': - if cell.number < 0.00: - cellValue = '${,f}'.format(-cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - cellValue = '(%s)' % cellValue - else: - cellValue = '${,f}'.format(cell.number) - cellValue = cellValue.split('.') - cellValue[1] = cellValue[1][:2] - cellValue = '.'.join(cellValue) - elif format == 'mm:ss': - cellValue = '99:99' - elif format == '[h]:mm:ss': - cellValue = '99:99:99' - elif format == 'mm:ss.0': - cellValue = '99:99.9999999' - elif format == '##0.0E+0': - cellValue = '%03.1E' % cell.number - elif format.lower() == 'general': - # General/general - cellValue = '%s' % cell.number - elif format == 'DD/MM/YYYY' or format == 'MM/DD/YYYY' or format == 'DD-MM-YYYY' or format == 'MM-DD-YYYY': - # Non standard yet common format - cellValue = '99/99/9999' - elif format == 'DD/MM/YY' or format == 'MM/DD/YY' or format == 'DD-MM-YY' or format == 'MM-DD-YY': - # Non standard yet common format - cellValue = '99/99/99' - else: - # Hmm, not a standard format, so just use the length of the format string itself. Ideas anyone? - cellValue = format - - # Minor adjustment just to make sure. - cellValue += " " - - # See if we can locate the xlwt font now - fontID = xf_rec[0] - font = workBook.style_fontsByID.get(fontID, None) - - if font: - # Get an Autofit Font manager from the cache - fontManager = workBook.locateAFFont(font) - # We have a font, so now use its details to process the text - width = fontManager.measureText(cellValue, font) - - return width diff --git a/SUEWSPrepare/Modules/xlwt/BIFFRecords.py b/SUEWSPrepare/Modules/xlwt/BIFFRecords.py index 265ccfe..9b8b8e9 100644 --- a/SUEWSPrepare/Modules/xlwt/BIFFRecords.py +++ b/SUEWSPrepare/Modules/xlwt/BIFFRecords.py @@ -1,7 +1,12 @@ -# -*- coding: cp1252 -*- +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + from struct import pack + +import six + from .UnicodeUtils import upack1, upack2, upack2rt -from .compat import basestring, unicode, unicode_type, xrange, iteritems + class SharedStringTable(object): _SST_ID = 0x00FC @@ -21,8 +26,8 @@ def __init__(self, encoding): self._current_piece = None def add_str(self, s): - if self.encoding != 'ascii' and not isinstance(s, unicode_type): - s = unicode(s, self.encoding) + if self.encoding != 'ascii' and not isinstance(s, six.text_type): + s = s.decode(self.encoding) self._add_calls += 1 if s not in self._str_indexes: idx = len(self._str_indexes) + len(self._rt_indexes) @@ -32,12 +37,12 @@ def add_str(self, s): idx = self._str_indexes[s] self._tally[idx] += 1 return idx - + def add_rt(self, rt): rtList = [] for s, xf in rt: - if self.encoding != 'ascii' and not isinstance(s, unicode_type): - s = unicode(s, self.encoding) + if self.encoding != 'ascii' and not isinstance(s, six.text_type): + s = s.decode(self.encoding) rtList.append((s, xf)) rt = tuple(rtList) self._add_calls += 1 @@ -67,13 +72,13 @@ def get_biff_record(self): self._sst_record = b'' self._continues = [None, None] self._current_piece = pack(' 0),int(px>0)) -> allowed values @@ -1374,7 +1384,7 @@ class PanesRecord(BiffRecord): (1,0):(1,3), (1,1):(0,1,2,3), } - + def __init__(self, px, py, first_row_bottom, first_col_right, active_pane): allowed = self.valid_active_pane.get( (int(px > 0),int(py > 0)) @@ -1728,10 +1738,10 @@ class RefModeRecord(BiffRecord): """ This record is part of the Calculation Settings Block. It stores which method is used to show cell addresses in formulas. - The RC mode uses numeric indexes for rows and columns, - i.e. R(1)C(-1), or R1C1:R2C2. - The A1 mode uses characters for columns and numbers for rows, - i.e. B1, or $A$1:$B$2. + The “RC” mode uses numeric indexes for rows and columns, + i.e. “R(1)C(-1)”, or “R1C1:R2C2”. + The “A1” mode uses characters for columns and numbers for rows, + i.e. “B1”, or “$A$1:$B$2”. Record REFMODE, BIFF2-BIFF8: @@ -1779,7 +1789,7 @@ def __init__(self, delta): class SaveRecalcRecord(BiffRecord): """ This record is part of the Calculation Settings Block. - It contains the Recalculate before save option in + It contains the “Recalculate before save” option in Excel's calculation settings dialogue. Record SAVERECALC, BIFF3-BIFF8: @@ -2374,7 +2384,7 @@ def __init__(self, refs): def get(self): res = [] nrefs = len(self.refs) - for idx in xrange(0, nrefs, _maxRefPerRecord): + for idx in range(0, nrefs, _maxRefPerRecord): chunk = self.refs[idx:idx+_maxRefPerRecord] krefs = len(chunk) if idx: # CONTINUE record @@ -2382,8 +2392,8 @@ def get(self): else: # ExternSheetRecord header = pack(" # Portions are Copyright (c) 2002-2004 John McNamara (Perl Spreadsheet::WriteExcel) -from .BIFFRecords import BiffRecord from struct import pack, unpack +from .BIFFRecords import BiffRecord + def _size_col(sheet, col): return sheet.col_width(col) @@ -91,8 +92,8 @@ def _position_image(sheet, row_start, col_start, x1, y1, width, height): row_end += 1 # Bitmap isn't allowed to start or finish in a hidden cell, i.e. a cell # with zero height or width. - if ((_size_col(sheet, col_start) == 0) or (_size_col(sheet, col_end) == 0) - or (_size_row(sheet, row_start) == 0) or (_size_row(sheet, row_end) == 0)): + if ((_size_col(sheet, col_start) == 0) or (_size_col(sheet, col_end) == 0) or + (_size_row(sheet, row_start) == 0) or (_size_row(sheet, row_end) == 0)): return # Convert the pixel values to the percentage value expected by Excel x1 = int(float(x1) / _size_col(sheet, col_start) * 1024) @@ -195,7 +196,9 @@ def _process_bitmap(bitmap): with open(bitmap, "rb") as fh: # Slurp the file into a string. data = fh.read() + return _process_bitmap_data(data) +def _process_bitmap_data(data): # Check that the file is big enough to be a bitmap. if len(data) <= 0x36: raise Exception("bitmap doesn't contain enough data.") @@ -240,21 +243,33 @@ def _process_bitmap(bitmap): return (width, height, size, data) -class ImDataBmpRecord(BiffRecord): +class ImRawDataBmpRecord(BiffRecord): _REC_ID = 0x007F - def __init__(self, filename): + def __init__(self, data): """Insert a 24bit bitmap image in a worksheet. The main record required is IMDATA but it must be proceeded by a OBJ record to define its position. """ BiffRecord.__init__(self) - self.width, self.height, self.size, data = _process_bitmap(filename) + self.width, self.height, self.size, data = _process_bitmap_data(data) + self._write_imdata(data) + + def _write_imdata(self, data): # Write the IMDATA record to store the bitmap data cf = 0x09 env = 0x01 lcb = self.size self._rec_data = pack("> 9 dir_sect_count = len(self.dir_stream) >> 9 - + total_sect_count = book_sect_count + dir_sect_count SAT_sect_count = 0 MSAT_sect_count = 0 @@ -155,7 +155,7 @@ def _build_sat(self): sect += 1 while sect < book_sect_count + MSAT_sect_count + SAT_sect_count: - self.SAT_sect.append(sect) + self.SAT_sect.append(sect) SAT[sect] = self.SID_USED_BY_SAT sect += 1 @@ -245,7 +245,7 @@ def _build_header(self): msat_start_sid, total_msat_sectors ]) - + def save(self, file_name_or_filelike_obj, stream): # 1. Align stream on 0x1000 boundary (and therefore on sector boundary) @@ -255,7 +255,7 @@ def save(self, file_name_or_filelike_obj, stream): self._build_directory() self._build_sat() self._build_header() - + f = file_name_or_filelike_obj we_own_it = not hasattr(f, 'write') if we_own_it: @@ -273,7 +273,7 @@ def save(self, file_name_or_filelike_obj, stream): if e.errno != 22: # "Invalid argument" i.e. 'stream' is too big raise # some other problem chunk_size = 4 * 1024 * 1024 - for offset in xrange(0, len(stream), chunk_size): + for offset in range(0, len(stream), chunk_size): f.write(buffer(stream, offset, chunk_size)) f.write(padding) f.write(self.packed_MSAT_2nd) diff --git a/SUEWSPrepare/Modules/xlwt/CreateLearningSheet.py b/SUEWSPrepare/Modules/xlwt/CreateLearningSheet.py deleted file mode 100644 index 98b4bdc..0000000 --- a/SUEWSPrepare/Modules/xlwt/CreateLearningSheet.py +++ /dev/null @@ -1,103 +0,0 @@ -#------------------------------------------------------------------------------- -# Name: CreateLearningSheet -# Purpose: Creates an Excel sheet of sample data. This is then "Autofitted" using the real Excel function -# and then read back in and processed to work out the character mappings to be used later. -# -# Author: Warwick Prince Mushroom Systems International Pty. Ltd. -# -# Created: 3/02/12 11:56 AM -#------------------------------------------------------------------------------- -#!/usr/bin/env python - -import xlwt -import sys - -def ExportSampleSheet(): - """ - Create a sample sheet format and save it to the file supplied. The user then must select all the sheets and using Excel, - Autofit them and save the workbook. Then, use GenerateAFFont to read the Excel formatted sheet back in and learn from it. - """ - - if len(sys.argv) != 2: - print 'Usage: CreateLearningSheet [SampleSheet.xls]' - sys.exit() - - # Get the workbook file name from the command line - fileName = sys.argv[1] - - if not fileName.lower().endswith('.xls'): - fileName = '%s.xls' % fileName - - xlWorkbook = xlwt.Workbook() - - s1 = xlWorkbook.add_sheet('Normal Sample UPPER') - s2 = xlWorkbook.add_sheet('Normal Sample Lower') - s3 = xlWorkbook.add_sheet('Normal Sample Other') - s4 = xlWorkbook.add_sheet('Bold Sample UPPER') - s5 = xlWorkbook.add_sheet('Bold Sample Lower') - s6 = xlWorkbook.add_sheet('Bold Sample Other') - - # Normal UPPER - for row in xrange(0, 2): - letterValue = 65 - for col in range(0, 51, 2): - letter = chr(letterValue) - s1.write(row, col, letter*10) - s1.write(row, col+1, letter*100) - letterValue += 1 - - # Normal lower - for row in xrange(0, 2): - letterValue = 97 - for col in range(0, 51, 2): - letter = chr(letterValue) - s2.write(row, col, letter*10) - s2.write(row, col+1, letter*100) - letterValue += 1 - - # Normal Other - for row in xrange(0, 2): - letterValue = 32 - for col in range(0, 66, 2): - letter = chr(letterValue) - s3.write(row, col, letter*10) - s3.write(row, col+1, letter*100) - letterValue += 1 - - style = xlwt.Style.XFStyle() - style.font.bold = True - - # Bold UPPER - for row in xrange(0, 2): - letterValue = 65 - for col in range(0, 51, 2): - letter = chr(letterValue) - s4.write(row, col, letter*10, style) - s4.write(row, col+1, letter*100, style) - letterValue += 1 - - # Bold lower - for row in xrange(0, 2): - letterValue = 97 - for col in range(0, 51, 2): - letter = chr(letterValue) - s5.write(row, col, letter*10, style) - s5.write(row, col+1, letter*100, style) - letterValue += 1 - - # Bold Other - for row in xrange(0, 2): - letterValue = 32 - for col in range(0, 66, 2): - letter = chr(letterValue) - s6.write(row, col, letter*10, style) - s6.write(row, col+1, letter*100, style) - letterValue += 1 - - try: - xlWorkbook.save(fileName) - except IOError: - print 'Failed to save filename "%s"' % fileName - -if __name__ == '__main__': - ExportSampleSheet() diff --git a/SUEWSPrepare/Modules/xlwt/ExcelFormula.py b/SUEWSPrepare/Modules/xlwt/ExcelFormula.py index ac70147..6461832 100644 --- a/SUEWSPrepare/Modules/xlwt/ExcelFormula.py +++ b/SUEWSPrepare/Modules/xlwt/ExcelFormula.py @@ -1,7 +1,8 @@ -# -*- coding: windows-1252 -*- +# -*- coding: utf-8 -*- -from . import ExcelFormulaParser, ExcelFormulaLexer import struct + +from . import ExcelFormulaLexer, ExcelFormulaParser from .antlr import ANTLRException @@ -40,4 +41,3 @@ def rpn(self): ''' return struct.pack(" "ExcelFormulaParser.py"$ ### import antlr and other modules .. -from . import antlr - ### header action >>> import struct -from . import Utils -from .UnicodeUtils import upack1 + +from . import Utils, antlr from .ExcelMagic import * +from .UnicodeUtils import upack1 _RVAdelta = {"R": 0, "V": 0x20, "A": 0x40} _RVAdeltaRef = {"R": 0, "V": 0x20, "A": 0x40, "D": 0x20} @@ -14,9 +13,9 @@ class FormulaParseException(Exception): - """ - An exception indicating that a Formula could not be successfully parsed. - """ + """ + An exception indicating that a Formula could not be successfully parsed. + """ ### header action <<< ### preamble action>>> @@ -168,7 +167,6 @@ def prec1_expr(self, self.prec2_expr(arg_type) self.rpn += op; - # print "**prec1_expr4 %s" % arg_type else: break @@ -382,23 +380,23 @@ def primary(self, chunklens = [len(chunk) for chunk in rpn_chunks] skiplens = [0] * nc skiplens[-1] = 3 - for ic in xrange(nc-1, 0, -1): - skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4 + for ic in range(nc-1, 0, -1): + skiplens[ic-1] = skiplens[ic] + chunklens[ic] + 4 jump_pos = [2 * nc + 2] - for ic in xrange(nc): - jump_pos.append(jump_pos[-1] + chunklens[ic] + 4) + for ic in range(nc): + jump_pos.append(jump_pos[-1] + chunklens[ic] + 4) chunk_shift = 2 * nc + 6 # size of tAttrChoose - for ic in xrange(nc): - for refx in xrange(ref_markers[ic], ref_markers[ic+1]): - ref = self.sheet_references[refx] - self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift) - chunk_shift += 4 # size of tAttrSkip + for ic in range(nc): + for refx in range(ref_markers[ic], ref_markers[ic+1]): + ref = self.sheet_references[refx] + self.sheet_references[refx] = (ref[0], ref[1], ref[2] + chunk_shift) + chunk_shift += 4 # size of tAttrSkip choose_rpn = [] choose_rpn.append(struct.pack("
diff --git a/TreeGenerator/makevegdems.py b/TreeGenerator/makevegdems.py index 0918b15..e59e921 100644 --- a/TreeGenerator/makevegdems.py +++ b/TreeGenerator/makevegdems.py @@ -1,5 +1,6 @@ +from builtins import range import numpy as np -import matplotlib.pylab as plt +# import matplotlib.pylab as plt def vegunitsgeneration(buildings, vegdem, vegdem2, ttype, height, trunk, dia, rowa, cola,sizex, sizey, scale): # This function creates the shape of each vegetation unit and locates it a grid. diff --git a/TreeGenerator/tree_generator.py b/TreeGenerator/tree_generator.py index 83c2318..da5fa41 100644 --- a/TreeGenerator/tree_generator.py +++ b/TreeGenerator/tree_generator.py @@ -20,24 +20,28 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion -from PyQt4.QtGui import QAction, QIcon, QMessageBox, QFileDialog +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion +from qgis.PyQt.QtWidgets import QAction, QMessageBox, QFileDialog +from qgis.PyQt.QtGui import QIcon from qgis.core import * from qgis.gui import * import webbrowser import os from osgeo import gdal import numpy as np -import makevegdems +from . import makevegdems from osgeo.gdalconst import * import sys # Import the code for the dialog -from tree_generator_dialog import TreeGeneratorDialog +from .tree_generator_dialog import TreeGeneratorDialog import os.path -class TreeGenerator: +class TreeGenerator(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -73,8 +77,10 @@ def __init__(self, iface): self.dlg.helpButton.clicked.connect(self.help) self.fileDialog = QFileDialog() - self.fileDialog.setFileMode(4) - self.fileDialog.setAcceptMode(1) + # self.fileDialog.setFileMode(4) + # self.fileDialog.setAcceptMode(1) + self.fileDialog.setFileMode(QFileDialog.Directory) + self.fileDialog.setOption(QFileDialog.ShowDirsOnly, True) # Declare instance attributes self.actions = [] @@ -323,10 +329,14 @@ def start_progress(self): tot_field = self.layerComboManagerTotalHeightField.currentField() dia_field = self.layerComboManagerDiameterField.currentField() - idx_ttype = vlayer.fieldNameIndex(ttype_field) - idx_trunk = vlayer.fieldNameIndex(trunk_field) - idx_tot = vlayer.fieldNameIndex(tot_field) - idx_dia = vlayer.fieldNameIndex(dia_field) + # idx_ttype = vlayer.fieldNameIndex(ttype_field) + # idx_trunk = vlayer.fieldNameIndex(trunk_field) + # idx_tot = vlayer.fieldNameIndex(tot_field) + # idx_dia = vlayer.fieldNameIndex(dia_field) + idx_ttype = vlayer.fields().indexFromName(ttype_field) + idx_trunk = vlayer.fields().indexFromName(trunk_field) + idx_tot = vlayer.fields().indexFromName(tot_field) + idx_dia = vlayer.fields().indexFromName(dia_field) if self.folderPath == 'None': QMessageBox.critical(self.dlg, "Error", "Select a valid output folder") @@ -388,7 +398,7 @@ def start_progress(self): QMessageBox.information(self.dlg, "TreeGenerator", "Vegetation DSMs succesfully generated") def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Spatial%20Data%20Tree%20Generator.html" + url = "http://www.urban-climate.net/umep/UMEP_Manual#Spatial_Data:_Tree_Generator" webbrowser.open_new_tab(url) def saveraster(self, gdal_data, filename, raster): diff --git a/TreeGenerator/tree_generator_dialog.py b/TreeGenerator/tree_generator_dialog.py index a6295f7..526cdb6 100644 --- a/TreeGenerator/tree_generator_dialog.py +++ b/TreeGenerator/tree_generator_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'tree_generator_dialog_base.ui')) -class TreeGeneratorDialog(QtGui.QDialog, FORM_CLASS): +class TreeGeneratorDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(TreeGeneratorDialog, self).__init__(parent) diff --git a/UMEP.py b/UMEP.py index 87e33f0..9a33b4a 100644 --- a/UMEP.py +++ b/UMEP.py @@ -20,37 +20,40 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QObject, SIGNAL, SLOT -from PyQt4.QtGui import QMenu, QAction, QIcon -from UMEP_dialog import UMEPDialog -from MetdataProcessor.metdata_processor import MetdataProcessor -from ShadowGenerator.shadow_generator import ShadowGenerator -from SkyViewFactorCalculator.svf_calculator import SkyViewFactorCalculator -from ImageMorphParam.image_morph_param import ImageMorphParam -from ImageMorphParmsPoint.imagemorphparmspoint_v1 import ImageMorphParmsPoint -from LandCoverFractionGrid.landcoverfraction_grid import LandCoverFractionGrid -from LandCoverFractionPoint.landcover_fraction_point import LandCoverFractionPoint -from LandCoverReclassifier.land_cover_reclassifier import LandCoverReclassifier -from WallHeight.wall_height import WallHeight -from SEBE.sebe import SEBE -from SEBEVisual.sun import Sun -from SuewsSimple.suews_simple import SuewsSimple -from SUEWSPrepare.suews_prepare import SUEWSPrepare -from TreeGenerator.tree_generator import TreeGenerator -from SUEWS.suews import SUEWS -from FootprintModel.footprint_model import FootprintModel -from WATCHData.watch import WATCHData -from GreaterQF.greater_qf import GreaterQF -from SOLWEIG.solweig import SOLWEIG -from ExtremeFinder.extreme_finder import ExtremeFinder -from SolweigAnalyzer.solweig_analyzer import SolweigAnalyzer -from SUEWSAnalyzer.suews_analyzer import SUEWSAnalyzer -from UMEPDownloader.umep_downloader import UMEP_Data_Download -from LCZ_Converter.LCZ_converter import LCZ_test -from LucyQf.LQF import LQF -from BenchMarking.benchmarking import BenchMarking -from DSMGenerator.dsm_generator import DSMGenerator -from UMEP_about import UMEPDialogAbout +from __future__ import absolute_import +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication +from qgis.PyQt.QtWidgets import QMenu, QAction, QMessageBox +from qgis.PyQt.QtGui import QIcon +from .UMEP_dialog import UMEPDialog +from .MetdataProcessor.metdata_processor import MetdataProcessor +from .ShadowGenerator.shadow_generator import ShadowGenerator +from .SkyViewFactorCalculator.svf_calculator import SkyViewFactorCalculator +from .ImageMorphParam.image_morph_param import ImageMorphParam +from .ImageMorphParmsPoint.imagemorphparmspoint_v1 import ImageMorphParmsPoint +from .LandCoverFractionGrid.landcoverfraction_grid import LandCoverFractionGrid +from .LandCoverFractionPoint.landcover_fraction_point import LandCoverFractionPoint +from .LandCoverReclassifier.land_cover_reclassifier import LandCoverReclassifier +from .WallHeight.wall_height import WallHeight +from .TreeGenerator.tree_generator import TreeGenerator +from .FootprintModel.footprint_model import FootprintModel +from .LCZ_Converter.LCZ_converter import LCZ_test +from .UMEPDownloader.umep_downloader import UMEP_Data_Download # TODO: No data visible in plugin interface +from .DSMGenerator.dsm_generator import DSMGenerator # TODO: Working except for OSMImport +from .WATCHData.watch import WATCHData # TODO: Gives errors during download and/or processing +# from .GreaterQF.greater_qf import GreaterQF # TODO: Multiple changes required :Plugin blocker +from .ExtremeFinder.extreme_finder import ExtremeFinder +# from .LucyQf.LQF import LQF # TODO: Multiple changes required :Plugin blocker +from .SEBE.sebe import SEBE +from .SuewsSimple.suews_simple import SuewsSimple +from .SUEWSPrepare.suews_prepare import SUEWSPrepare +from .SUEWS.suews import SUEWS +from .SOLWEIG.solweig import SOLWEIG +from .BenchMarking.benchmarking import BenchMarking # TODO: KeyError: 'input_cfgfiles' +# from .SEBEVisual.sun import Sun # TODO: Not able to run 2to3 converter :Plugin blocker +from .SolweigAnalyzer.solweig_analyzer import SolweigAnalyzer +from .SUEWSAnalyzer.suews_analyzer import SUEWSAnalyzer +from .UMEP_about import UMEPDialogAbout import os.path import webbrowser @@ -60,7 +63,7 @@ # import pydevd -class UMEP: +class UMEP(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -264,6 +267,7 @@ def __init__(self, iface): # Sub-menus to About self.About_Action = QAction("About", self.iface.mainWindow()) self.About_Menu.addAction(self.About_Action) + self.About_Action.triggered.connect(self.About) self.Manual_Action = QAction("UMEP on the web", self.iface.mainWindow()) self.About_Menu.addAction(self.Manual_Action) self.Manual_Action.triggered.connect(self.help) @@ -344,7 +348,9 @@ def initGui(self): parent=self.iface.mainWindow()) # Code to show the about dialog - QObject.connect(self.About_Action, SIGNAL("triggered()"), self.dlgAbout, SLOT("show()")) + # QObject.connect(self.About_Action, SIGNAL("triggered()"), self.dlgAbout, SLOT("show()")) + # QObject.signal.connect(self.dlgAbout) + def unload(self): """Removes the plugin menu item and icon from QGIS GUI.""" @@ -355,6 +361,9 @@ def unload(self): self.iface.removeToolBarIcon(action) self.iface.mainWindow().menuBar().removeAction(self.UMEP_Menu.menuAction()) + def About(self): + self.dlgAbout.show() + def PED(self): sg = MetdataProcessor(self.iface) sg.run() @@ -409,6 +418,9 @@ def SE(self): sg.run() def SEv(self): + QMessageBox.critical(self.dlg, "Plugin blocker", + "This tool is not yet ported to QGIS3. Work still in progress.") + return sg = Sun(self.iface) sg.run() @@ -421,6 +433,9 @@ def WA(self): sg.run() def GF(self): + QMessageBox.critical(self.dlg, "Plugin blocker", + "This tool is not yet ported to QGIS3. Work still in progress.") + return sg = GreaterQF(self.iface) sg.run() @@ -457,6 +472,9 @@ def WC(self): sg.run() def LF(self): + QMessageBox.critical(self.dlg, "Plugin blocker", + "This tool is not yet ported to QGIS3. Work still in progress.") + return sg = LQF(self.iface) sg.run() @@ -470,7 +488,7 @@ def run(self): self.dlg.exec_() def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/index.html" + url = "http://urban-climate.net/umep/" webbrowser.open_new_tab(url) diff --git a/UMEPDownloader/DownloadDataWorker.py b/UMEPDownloader/DownloadDataWorker.py index c4717aa..bfe4fa4 100644 --- a/UMEPDownloader/DownloadDataWorker.py +++ b/UMEPDownloader/DownloadDataWorker.py @@ -1,5 +1,8 @@ -from PyQt4.QtCore import QObject, pyqtSignal -import urllib +from future import standard_library +standard_library.install_aliases() +from builtins import str +from qgis.PyQt.QtCore import QObject, pyqtSignal +import urllib.request, urllib.parse, urllib.error import os import xml.etree.ElementTree as etree from qgis.core import QgsRasterLayer, QgsRasterPipe, QgsRasterFileWriter @@ -10,7 +13,7 @@ class DownloadDataWorker(QObject): # Worker to get raster data saved to a file in a thread finished = pyqtSignal(object) update = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) def __init__(self, baseURL, layerName, outputFile, bbox, resolution, srs): QObject.__init__(self) self.baseURL = baseURL @@ -27,7 +30,7 @@ def run(self): try: output = webToRaster(self.baseURL, self.layerName, self.outputFile, self.bbox, self.resolution, self.srs, self.update) self.finished.emit({'filename':output, 'srs':self.srs, 'progress':100}) - except Exception,e: + except Exception as e: self.error.emit(e, traceback.format_exc()) def webToRaster(baseURL, layer_name, output_file, bbox, resolution, srs, update): @@ -57,12 +60,12 @@ def webToRaster(baseURL, layer_name, output_file, bbox, resolution, srs, update) try: dataOut = tempfile.mktemp('.tif') - except Exception,e: + except Exception as e: raise Exception('Problem creating temporary file to store raster data: '+ str(e)) # TODO: Work out if the response is an XML error update.emit({'progress':10, 'message':'Downloading file...'}) - urllib.urlretrieve(bigURL, dataOut) + urllib.request.urlretrieve(bigURL, dataOut) # Load data as QgsRasterLayer and then re-save it, ensuring it has the correct projection info a = QgsRasterLayer(dataOut, "temporary raster layer") diff --git a/UMEPDownloader/GetMetaWorker.py b/UMEPDownloader/GetMetaWorker.py index 317c28f..fd022a2 100644 --- a/UMEPDownloader/GetMetaWorker.py +++ b/UMEPDownloader/GetMetaWorker.py @@ -1,5 +1,8 @@ -from PyQt4.QtCore import QObject, pyqtSignal -import urllib2 +from future import standard_library +standard_library.install_aliases() +from builtins import str +from qgis.PyQt.QtCore import QObject, pyqtSignal +import urllib.request, urllib.error, urllib.parse import xml.etree.ElementTree as etree import traceback @@ -7,7 +10,7 @@ class GetMetaWorker(QObject): # Worker to get WMS metadata for a layer to keep the process from slowing down the interace thread finished = pyqtSignal(object) update = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) def __init__(self, baseURL, layerName): QObject.__init__(self) self.baseURL = baseURL @@ -20,7 +23,7 @@ def run(self): try: output = getWMSInfo(self.baseURL, self.layerName, self.update) self.finished.emit(output) - except Exception,e: + except Exception as e: self.error.emit(e, traceback.format_exc()) @@ -34,7 +37,7 @@ def getWMSInfo(baseURL, layer_name, update): ''' update.emit({'Abstract':'Loading abstract for layer...'}) caps = baseURL + '/wms?service=WMS&version=1.1.1&request=GetCapabilities' - f = urllib2.urlopen(caps) + f = urllib.request.urlopen(caps) data = f.read() f.close() root = etree.fromstring(data) diff --git a/UMEPDownloader/resources.py b/UMEPDownloader/resources.py index e37056f..81147db 100644 --- a/UMEPDownloader/resources.py +++ b/UMEPDownloader/resources.py @@ -6,7 +6,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/UMEPDownloader/umep_downloader.py b/UMEPDownloader/umep_downloader.py index fed21f5..6b4eed3 100644 --- a/UMEPDownloader/umep_downloader.py +++ b/UMEPDownloader/umep_downloader.py @@ -20,18 +20,25 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt, QThread -from PyQt4.QtGui import QAction, QIcon, QAbstractItemView, QMessageBox, QWidget, QHeaderView, QTableWidgetItem, QListWidgetItem, QFileDialog -from qgis.core import QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsRectangle, QgsPoint, QgsGeometry, QgsRasterLayer, QgsMapLayerRegistry +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import map +from builtins import str +from builtins import range +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt, QThread +from qgis.PyQt.QtWidgets import QAction, QAbstractItemView, QMessageBox, QWidget, QHeaderView, QTableWidgetItem, QListWidgetItem, QFileDialog +from qgis.PyQt.QtGui import QIcon +from qgis.core import QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsRectangle, QgsPoint, QgsGeometry, QgsRasterLayer, QgsProject # Initialize Qt resources from file resources.py -import resources +# from . import resources # Import the code for the dialog -from umep_downloader_dialog import UMEP_Data_DownloadDialog +from .umep_downloader_dialog import UMEP_Data_DownloadDialog import os.path import sys import subprocess import shutil - try: # Assuming in UMEP folder strcuture, so get f90nml from Utilities from ..Utilities import f90nml @@ -39,13 +46,13 @@ # If not present, assume plugin is standalone and has its own f90nml import f90nml -import urllib2 -import urllib +import urllib.request, urllib.error, urllib.parse +import urllib.request, urllib.parse, urllib.error import xml.etree.ElementTree as etree import tempfile import numpy as np -from GetMetaWorker import GetMetaWorker -from DownloadDataWorker import DownloadDataWorker +from .GetMetaWorker import GetMetaWorker +from .DownloadDataWorker import DownloadDataWorker def getLayerMetadata(baseURL, layer_name): ''' Uses WCS DescribeCoverage request to get metadata from a layer of interest on a remote server @@ -57,7 +64,7 @@ def getLayerMetadata(baseURL, layer_name): #### WCS data: For grid resolution and other detailed stuff # Get data coverageInfoURL = baseURL + '/wcs?SERVICE=WCS&VERSION=1.0.0&REQUEST=DescribeCoverage&coverage=%s'%(layer_name,) - f = urllib2.urlopen(coverageInfoURL) + f = urllib.request.urlopen(coverageInfoURL) data = f.read() f.close() root = etree.fromstring(data) @@ -110,14 +117,14 @@ def getLayerMetadata(baseURL, layer_name): lowVals = limField.find("{http://www.opengis.net/gml}low") highVals = limField.find("{http://www.opengis.net/gml}high") if (lowVals is not None) and (highVals is not None): - lowVals = map(int, lowVals.text.split(" ")) - highVals = map(int, highVals.text.split(" ")) + lowVals = list(map(int, lowVals.text.split(" "))) + highVals = list(map(int, highVals.text.split(" "))) numXpoints = highVals[0] - lowVals[0] numYpoints = highVals[1] - lowVals[1] numPoints = {'x':numXpoints, 'y':numYpoints} # Get origin of grid in native CRS originField = gridField.find("{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos") - origin = map(float, originField.text.split(" ")) # assume x and y respectively. + origin = list(map(float, originField.text.split(" "))) # assume x and y respectively. originDict = {'x':origin[0], 'y':origin[1]} # Get grid extent in native SRS @@ -127,9 +134,9 @@ def getLayerMetadata(baseURL, layer_name): highVals = envelopeVals[1] if (lowVals is not None) and (highVals is not None): - lowVals = map(float, lowVals.text.split(" ")) + lowVals = list(map(float, lowVals.text.split(" "))) - highVals = map(float, highVals.text.split(" ")) + highVals = list(map(float, highVals.text.split(" "))) extentDict = {'xMin':lowVals[0], 'xMax':highVals[0], 'yMin':lowVals[1], 'yMax':highVals[1]} @@ -137,7 +144,7 @@ def getLayerMetadata(baseURL, layer_name): return result -class UMEP_Data_Download: +class UMEP_Data_Download(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -194,7 +201,7 @@ def __init__(self, iface): def runDownload_errorWrapper(self): try: self.runDownload() - except Exception,e: + except Exception as e: QMessageBox.critical(None, 'Error', str(e)) def updateAbstract(self): @@ -209,11 +216,11 @@ def updateAbstract(self): collection = self.readacross[collection[0].text()] # Omit first key from list as it should just be collection label - if self.dlg.tblDatasets.currentRow() > len(self.catalogue[collection].keys())-1: + if self.dlg.tblDatasets.currentRow() > len(list(self.catalogue[collection].keys()))-1: # If the selected row is beyond the end of the requested data, it's probably a bug return try: - subEntry = self.catalogue[collection].keys()[1:][self.dlg.tblDatasets.currentRow()] + subEntry = list(self.catalogue[collection].keys())[1:][self.dlg.tblDatasets.currentRow()] except: return baseURL = self.catalogue[collection][subEntry][2] @@ -257,10 +264,10 @@ def refreshList(self): except Exception: pass - f = urllib2.urlopen('http://www.urban-climate.net/umep/repo/catalogue.nml') + f = urllib.request.urlopen('http://www.urban-climate.net/umep/repo/catalogue.nml') tempFile = tempfile.mktemp(".nml") with open(tempFile, "w") as tmp: - tmp.write(f.read()) + tmp.write(str(f.read())) f.close() tmp.close() self.catalogue = f90nml.read(tempFile) @@ -320,7 +327,7 @@ def updateList(self): txt = self.readacross[items[0].text()] - self.dlg.tblDatasets.setRowCount(len(self.catalogue[txt].keys())-1) + self.dlg.tblDatasets.setRowCount(len(list(self.catalogue[txt].keys()))-1) self.dlg.tblDatasets.setHorizontalHeaderLabels("Source;Description;Date;Resolution;Extent".split(";")) self.dlg.tblDatasets.setSelectionMode(QAbstractItemView.SingleSelection) header = self.dlg.tblDatasets.horizontalHeader() @@ -377,11 +384,11 @@ def runDownload(self): collection = self.readacross[collection[0].text()] # Omit first key from list as it should just be collection label - if self.dlg.tblDatasets.currentRow() > len(self.catalogue[collection].keys()[1:]): + if self.dlg.tblDatasets.currentRow() > len(list(self.catalogue[collection].keys())[1:]): # If the selected row is beyond the end of the requested data, it's probably a bug return - subEntry = self.catalogue[collection].keys()[1:][self.dlg.tblDatasets.currentRow()] + subEntry = list(self.catalogue[collection].keys())[1:][self.dlg.tblDatasets.currentRow()] baseURL = self.catalogue[collection][subEntry][2] #dataSourceType = self.catalogue[collection][subEntry][1] layerName = self.catalogue[collection][subEntry][3] @@ -392,7 +399,7 @@ def runDownload(self): # Compare the requested data with the native parameters of the layer and tell the user something helpful # Read metadata try: - meta = getLayerMetadata(baseURL, urllib.quote(layerName)) + meta = getLayerMetadata(baseURL, urllib.parse.quote(layerName)) except: meta = None @@ -551,7 +558,7 @@ def downloadWorkerFinished(self, returns): crs.createFromId(int(returns['srs'].split(':')[1])) self.rasterLayer.setCrs(crs) - QgsMapLayerRegistry.instance().addMapLayer(self.rasterLayer) + QgsProject.instance().addMapLayer(self.rasterLayer) def tr(self, message): return QCoreApplication.translate('UMEP_Data_Download', message) diff --git a/UMEPDownloader/umep_downloader_dialog.py b/UMEPDownloader/umep_downloader_dialog.py index a1d8c24..084fe8a 100644 --- a/UMEPDownloader/umep_downloader_dialog.py +++ b/UMEPDownloader/umep_downloader_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'umep_downloader_dialog_base.ui')) -class UMEP_Data_DownloadDialog(QtGui.QDialog, FORM_CLASS): +class UMEP_Data_DownloadDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(UMEP_Data_DownloadDialog, self).__init__(parent) diff --git a/UMEP_about.py b/UMEP_about.py index b0919c6..bb15999 100644 --- a/UMEP_about.py +++ b/UMEP_about.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import QtGui, uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'UMEP_about.ui')) -class UMEPDialogAbout(QtGui.QDialog, FORM_CLASS): +class UMEPDialogAbout(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(UMEPDialogAbout, self).__init__(parent) diff --git a/UMEP_about.ui b/UMEP_about.ui index 0bd5427..5520564 100644 --- a/UMEP_about.ui +++ b/UMEP_about.ui @@ -7,7 +7,7 @@ 0 0 505 - 443 + 452 @@ -31,7 +31,8 @@ p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'MS Shell Dlg 2'; font-size:8.25pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:12pt;">UMEP - Universal Multi-scale Environmental Predictor </span></p> -<p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">Version: 1.5 for QGIS2.x, 3 August 2018</span></p> +<p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">Version: 1.5 (Early Adopter Release) for QGIS 3.x, 3 August 2018</span></p> +<p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">NOTE: Some parts of the plugin is currently not operational.</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt;">UMEP is a plugin designed for urban climate in general and climate sensitive planning applications in particular. The plugin consist of a pre-processor, a processor and a post-processor. This plugin is a collaboration between University of Gothenburg, University of Helsinki and University of Reading.</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt;">UMEP plugin - License GNU GPL 2</span></p> <p style=" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:10pt;">Authors - Fredrik Lindberg, Sue Grimmond, Leena Järvi, Helen Ward, Niklas Krave, Christoph William Kent, Andy Gabey, Shiho Onomura, Frans Olofson, Bei Huang, Ting Sun and Nils Wallenberg</span></p> diff --git a/UMEP_dialog.py b/UMEP_dialog.py index eaabde3..0477e21 100644 --- a/UMEP_dialog.py +++ b/UMEP_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'UMEP_dialog_base.ui')) -class UMEPDialog(QtGui.QDialog, FORM_CLASS): +class UMEPDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(UMEPDialog, self).__init__(parent) @@ -38,4 +39,4 @@ def __init__(self, parent=None): # self., and you can use autoconnect slots - see # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html # #widgets-and-dialogs-with-auto-connect - self.setupUi(self) + self.setupUi(self) \ No newline at end of file diff --git a/Utilities/SEBESOLWEIGCommonFiles/Solweig_v2015_metdata_noload.py b/Utilities/SEBESOLWEIGCommonFiles/Solweig_v2015_metdata_noload.py index 0927093..c957702 100644 --- a/Utilities/SEBESOLWEIGCommonFiles/Solweig_v2015_metdata_noload.py +++ b/Utilities/SEBESOLWEIGCommonFiles/Solweig_v2015_metdata_noload.py @@ -1,5 +1,6 @@ +from __future__ import absolute_import # from importdata import importdata -import sun_position as sp +from . import sun_position as sp import numpy as np import datetime import calendar diff --git a/Utilities/SEBESOLWEIGCommonFiles/clearnessindex_2013b.py b/Utilities/SEBESOLWEIGCommonFiles/clearnessindex_2013b.py index 05611ab..72cba49 100644 --- a/Utilities/SEBESOLWEIGCommonFiles/clearnessindex_2013b.py +++ b/Utilities/SEBESOLWEIGCommonFiles/clearnessindex_2013b.py @@ -1,6 +1,7 @@ +from __future__ import absolute_import author = 'xlinfr' -import sun_distance +from . import sun_distance import numpy as np import math diff --git a/Utilities/SEBESOLWEIGCommonFiles/sun_position.py b/Utilities/SEBESOLWEIGCommonFiles/sun_position.py index 157c228..dfc00d0 100644 --- a/Utilities/SEBESOLWEIGCommonFiles/sun_position.py +++ b/Utilities/SEBESOLWEIGCommonFiles/sun_position.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import division +from __future__ import print_function import datetime import numpy as np @@ -198,7 +199,7 @@ def julian_calculation(t_input): A = np.floor(Y/100) B = 2 - A + np.floor(A/4) else: - print 'This date never existed!. Date automatically set to October 4, 1582' + print('This date never existed!. Date automatically set to October 4, 1582') time['month'] = 10 time['day'] = 4 B = 0 diff --git a/Utilities/f90nml/__init__.py b/Utilities/f90nml/__init__.py index 16fb21f..6410f3d 100644 --- a/Utilities/f90nml/__init__.py +++ b/Utilities/f90nml/__init__.py @@ -6,8 +6,9 @@ :copyright: Copyright 2014 Marshall Ward, see AUTHORS for details. :license: Apache License, Version 2.0, see LICENSE for details. """ +from __future__ import absolute_import # from f90nml.parser import Parser -from parser import Parser +from .parser import Parser __version__ = '0.12' diff --git a/Utilities/f90nml/namelist.py b/Utilities/f90nml/namelist.py index b939085..b8221da 100644 --- a/Utilities/f90nml/namelist.py +++ b/Utilities/f90nml/namelist.py @@ -7,6 +7,7 @@ :license: Apache License, Version 2.0, see LICENSE for details. """ from __future__ import print_function +from builtins import str import os try: @@ -22,7 +23,7 @@ def __init__(self, *args, **kwds): super(NmlDict, self).__init__(*args, **kwds) # Convert any internal dicts to NmlDicts - for key, val in self.items(): + for key, val in list(self.items()): if isinstance(val, dict): self[key] = NmlDict(val) @@ -220,7 +221,7 @@ def write(self, nml_path, force=False): raise IOError('File {0} already exists.'.format(nml_path)) with open(nml_path, 'w') as nml_file: - for grp_name, grp_vars in self.items(): + for grp_name, grp_vars in list(self.items()): # Check for repeated namelist records (saved as lists) if isinstance(grp_vars, list): for g_vars in grp_vars: @@ -228,7 +229,7 @@ def write(self, nml_path, force=False): else: self.write_nmlgrp(grp_name, grp_vars, nml_file) - if self.items(): + if list(self.items()): with open(nml_path, 'rb+') as nml_file: nml_file.seek(-1, os.SEEK_END) nml_file.truncate() @@ -241,7 +242,7 @@ def write_nmlgrp(self, grp_name, grp_vars, nml_file): print('&{0}'.format(grp_name), file=nml_file) - for v_name, v_val in grp_vars.items(): + for v_name, v_val in list(grp_vars.items()): for v_str in self.var_strings(v_name, v_val): nml_line = self.indent + '{0}'.format(v_str) @@ -260,7 +261,7 @@ def var_strings(self, v_name, v_values): # Parse derived type contents if isinstance(v_values, dict): - for f_name, f_vals in v_values.items(): + for f_name, f_vals in list(v_values.items()): v_title = '%'.join([v_name, f_name]) v_strs = self.var_strings(v_title, f_vals) diff --git a/Utilities/f90nml/parser.py b/Utilities/f90nml/parser.py index 7a7491d..466243e 100644 --- a/Utilities/f90nml/parser.py +++ b/Utilities/f90nml/parser.py @@ -7,13 +7,17 @@ :copyright: Copyright 2014 Marshall Ward, see AUTHORS for details. :license: Apache License, Version 2.0, see LICENSE for details. """ +from __future__ import absolute_import +from builtins import next +from builtins import range +from builtins import object import copy import itertools import shlex from string import whitespace -from fpy import pyfloat, pycomplex, pybool, pystr -from namelist import NmlDict +from .fpy import pyfloat, pycomplex, pybool, pystr +from .namelist import NmlDict class Parser(object): @@ -127,7 +131,7 @@ def read(self, nml_fname, nml_patch_in=None, patch_fname=None): if self.token in ('/', '&', '$'): # Append any remaining patched variables - for v_name, v_val in grp_patch.items(): + for v_name, v_val in list(grp_patch.items()): g_vars[v_name] = v_val v_strs = nmls.var_strings(v_name, v_val) for v_str in v_strs: @@ -185,7 +189,7 @@ def parse_variable(self, parent, patch_nml=None): i_r = 1 if not v_indices[0][2] else v_indices[0][2] if i_e: - v_idx = iter(range(i_s, i_e, i_r)) + v_idx = iter(list(range(i_s, i_e, i_r))) else: v_idx = (i_s + i_r * k for k in itertools.count()) else: diff --git a/Utilities/landCoverFractions_v1.py b/Utilities/landCoverFractions_v1.py index 2366dea..6ec36bc 100644 --- a/Utilities/landCoverFractions_v1.py +++ b/Utilities/landCoverFractions_v1.py @@ -15,8 +15,8 @@ # scale = 1/pixel resolution (m) # dtheta = 5. # degree interval # import Image -# import scipy.ndimage.interpolation as sc -from scipy import misc as sc +import scipy.ndimage.interpolation as sc +# from scipy import misc as sc import numpy as np # import scipy.ndimage.interpolation as sc # import PIL @@ -55,9 +55,9 @@ def landcover_v1(lc_grid, mid, dtheta, dlg, imp_point): if imp_point == 1: dlg.progressBar.setValue(angle) - # d = sc.rotate(lc_grid, angle, reshape=False, mode='nearest') + d = sc.rotate(lc_grid, angle, reshape=False, mode='nearest') # b = ((build.max()-build.min())/d.max())*d+build.min() - d = sc.imrotate(lc_grid, angle, 'nearest') + # d = sc.imrotate(lc_grid, angle, 'nearest') # d = sc.rotate(lc_grid, angle, reshape=False, mode='nearest') b = np.round(((lc_grid.max()-lc_grid.min())/d.max())*d+lc_grid.min(), 0) diff --git a/Utilities/ncWMSConnector.py b/Utilities/ncWMSConnector.py index e999d62..1d2d70c 100644 --- a/Utilities/ncWMSConnector.py +++ b/Utilities/ncWMSConnector.py @@ -1,3 +1,6 @@ +from builtins import str +from builtins import range +from builtins import object from datetime import datetime as dt from datetime import timedelta as td import tempfile @@ -30,7 +33,7 @@ def __init__(self): 'Snowf', 'SWdown'] - self.start_date = dt(1979,01,01,00,00,00) # The first data point available in the dataset on the server + self.start_date = dt(1979,0o1,0o1,00,00,00) # The first data point available in the dataset on the server self.end_date = dt(2015,12,31,21,00,00) # The final data point available in the dataset on the server self.time_res = 3600 * 3 # Time resolution of data in seconds self.request_length = 200 # Number of days of data to request at a time from server (helps manage load on server and produce a progress bar) @@ -153,17 +156,17 @@ def convert_to_nc3(self): ''' # Convert each file to netcdf4_classic so it can be used with MFDataset - for file_date in self.results.keys(): + for file_date in list(self.results.keys()): tmp = tempfile.mktemp(suffix='.nc') new_data = nc4.Dataset(tmp, 'w', clobber=True, format='NETCDF3_CLASSIC') extant = nc4.Dataset(self.results[file_date]) # from https://gist.github.com/guziy/8543562 - for dname, the_dim in extant.dimensions.iteritems(): + for dname, the_dim in extant.dimensions.items(): new_data.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) # Copy variables from first file - for v_name, varin in extant.variables.iteritems(): + for v_name, varin in extant.variables.items(): if varin.datatype == 'int64': dtype = 'f' # Use a float instead of a long integer because netcdf3 classic doesn't allow else: @@ -208,7 +211,7 @@ def average_data(self, period, method): :return: ''' - combined_data = nc4.MFDataset(self.results.values(), aggdim='time') + combined_data = nc4.MFDataset(list(self.results.values()), aggdim='time') # Create new netCDF file that'll contain averaged/combined data and delete the individual files # from https://gist.github.com/guziy/8543562 @@ -230,14 +233,14 @@ def average_data(self, period, method): new_dim = [len(new_time_bins), dim[1], dim[2]] # Duplicate all non-time dimensions from original file, and add new time dimension - for dname, the_dim in combined_data.dimensions.iteritems(): + for dname, the_dim in combined_data.dimensions.items(): if dname == 'time': t = new_data.createDimension(dname, len(new_time_bins)) else: new_data.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) # Copy variables from first file - for v_name, varin in combined_data.variables.iteritems(): + for v_name, varin in combined_data.variables.items(): outVar = new_data.createVariable(v_name, varin.dtype, varin.dimensions) try: if v_name == 'time': @@ -276,7 +279,7 @@ def average_data(self, period, method): new_data.close() combined_data.close() - [os.remove(v) for v in self.results.values()] # Remove individual files as no longer needed + [os.remove(v) for v in list(self.results.values())] # Remove individual files as no longer needed return tmp # Return path to combined file def retrieve(self, start_period, end_period): @@ -292,7 +295,7 @@ def retrieve(self, start_period, end_period): try: dataOut = tempfile.mktemp('.nc') - except Exception,e: + except Exception as e: os.remove(dataOut) raise Exception('Problem creating temporary file to store raster data: '+ str(e)) # TODO: Work out if the response is an XML error diff --git a/WATCHData/WFDEIDownloader/FTPdownload.py b/WATCHData/WFDEIDownloader/FTPdownload.py index ef5d73a..84c5d65 100644 --- a/WATCHData/WFDEIDownloader/FTPdownload.py +++ b/WATCHData/WFDEIDownloader/FTPdownload.py @@ -1,3 +1,7 @@ +from __future__ import print_function +from future import standard_library +standard_library.install_aliases() +from builtins import str # Funtion: # Download WATCH/WFDEI data for specified date range and variable. # Author: @@ -11,7 +15,7 @@ from ftplib import FTP import os import sys -import urllib +import urllib.request, urllib.parse, urllib.error import numpy as np from datetime import date,datetime from dateutil.relativedelta import relativedelta @@ -23,9 +27,11 @@ def single_file_download(path, key_word, ftp): print("File exist! Try again...") else: f = open(path, 'wb') - print "%s downloading..." % key_word + # fix_print_with_import + print("%s downloading..." % key_word) ftp.retrbinary('RETR %s' % key_word, f.write) - print "%s download succeed!" % key_word + # fix_print_with_import + print("%s download succeed!" % key_word) def time_period_files_download(key_word, path, ftp): @@ -52,12 +58,15 @@ def time_period_files_download(key_word, path, ftp): # path_temp = path + file_name path_temp = os.path.join(path, file_name) if os.path.lexists(path_temp): - print "%s exists!" % file_name + # fix_print_with_import + print("%s exists!" % file_name) else: f = open(path_temp, 'wb') - print "%s downloading..." % file_name + # fix_print_with_import + print("%s downloading..." % file_name) ftp.retrbinary('RETR %s' % file_name, f.write) - print "%s download succeed!" % file_name + # fix_print_with_import + print("%s download succeed!" % file_name) def time_period_test(firstAvailableTime, finalAvailableTime, start_time, end_time): diff --git a/WATCHData/WFDEIDownloader/WFDEI_Interpolator.py b/WATCHData/WFDEIDownloader/WFDEI_Interpolator.py index 8c15f8f..db69728 100644 --- a/WATCHData/WFDEIDownloader/WFDEI_Interpolator.py +++ b/WATCHData/WFDEIDownloader/WFDEI_Interpolator.py @@ -1,3 +1,5 @@ +from builtins import str +from builtins import range ########################################################################## # WFDEI Interpolator ########################################################################## @@ -243,7 +245,7 @@ def process_rainAmongN(rain, rainAmongN): rain_proc = rain.copy() rain_sub = rain_proc[rain_proc > 0] - rain_sub_ind = rain_sub.groupby(rain_sub).groups.values() + rain_sub_ind = list(rain_sub.groupby(rain_sub).groups.values()) rain_sub_indx = np.array( [x for x in rain_sub_ind if len(x) == 3]).flatten() rain_sub = rain_proc[rain_sub_indx] diff --git a/WATCHData/WatchWorker.py b/WATCHData/WatchWorker.py index 10de25a..07377a2 100644 --- a/WATCHData/WatchWorker.py +++ b/WATCHData/WatchWorker.py @@ -1,13 +1,14 @@ +from __future__ import absolute_import # Worker object for WATCH data refinement -from PyQt4.QtCore import QObject, pyqtSignal -from WFDEIDownloader.FTPdownload import * +from qgis.PyQt.QtCore import QObject, pyqtSignal +from .WFDEIDownloader.FTPdownload import * import traceback -from WFDEIDownloader.WFDEI_Interpolator import * +from .WFDEIDownloader.WFDEI_Interpolator import * class WatchWorker(QObject): finished = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) update = pyqtSignal(object) def __init__(self, rawdata, datestart, dateend, input_AH_path, output_path, lat, lon, hgt, UTC_offset_h, rainAmongN): @@ -43,7 +44,7 @@ def run(self): runExtraction(self.rawdata, self.output_path, self.datestart.year, self.dateend.year, self.hgt, self.UTC_offset_h, self.rainAmongN, self.update) - except Exception, e: + except Exception as e: self.error.emit(e, traceback.format_exc()) else: # incorporating AH results @@ -52,7 +53,7 @@ def run(self): self.datestart.year, self.dateend.year, self.hgt, self.UTC_offset_h, self.rainAmongN, self.update) - except Exception, e: + except Exception as e: self.error.emit(e, traceback.format_exc()) self.finished.emit(None) diff --git a/WATCHData/resources_rc.py b/WATCHData/resources_rc.py index 9a5bfd1..afac6ad 100644 --- a/WATCHData/resources_rc.py +++ b/WATCHData/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/WATCHData/tests.py b/WATCHData/tests.py index 13642d5..2ea2df8 100644 --- a/WATCHData/tests.py +++ b/WATCHData/tests.py @@ -1,3 +1,5 @@ +from __future__ import print_function +from builtins import range import unittest #from WFDEIDownloader.WFDEI_Interpolator import height_solver_WFDEI import os @@ -8,10 +10,12 @@ def height_solver_WFDEI(lat, lon): with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'WFDEIDownloader/WFDEI-land-long-lat-height.txt')) as f: ls = [line.split() for line in f] - print (glat, glon) + # fix_print_with_import + print((glat, glon)) for i in range(7, len(ls)): if float(ls[i][0]) == glon and float(ls[i][1]) == glat: - print ls[i] + # fix_print_with_import + print(ls[i]) return float(ls[i][2]) break # oceanic grids determined as 0.0 @@ -44,7 +48,8 @@ def lon_lat_grid(lat, lon): lat = -(-int(lat) + 0.25) else: lat = -(-int(lat) + 0.75) - print (lat, lon) + # fix_print_with_import + print((lat, lon)) return lat, lon @@ -52,5 +57,6 @@ class TestBasicCalcs(unittest.TestCase): ''' Tests for custom temperature response calculation''' def testZeroInLondon(self): - print height_solver_WFDEI(51.539, -0.142) + # fix_print_with_import + print(height_solver_WFDEI(51.539, -0.142)) ''' Extreme low should be a particular value''' diff --git a/WATCHData/watch.py b/WATCHData/watch.py index 1068623..4686dcf 100644 --- a/WATCHData/watch.py +++ b/WATCHData/watch.py @@ -20,26 +20,30 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QObject, pyqtSignal, QThread -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox +from __future__ import absolute_import +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QObject, pyqtSignal, QThread +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.gui import * from osgeo import osr, ogr -from watch_dialog import WATCHDataDialog +from .watch_dialog import WATCHDataDialog from calendar import monthrange import os.path import shutil import webbrowser from ..Utilities.ncWMSConnector import NCWMS_Connector -from WFDEIDownloader.WFDEI_Interpolator import * +from .WFDEIDownloader.WFDEI_Interpolator import * import traceback import datetime -from WatchWorker import WatchWorker +from .WatchWorker import WatchWorker class DownloadDataWorker(QObject): # Worker to get netCDF data using a separate thread finished = pyqtSignal(object) update = pyqtSignal(object) - error = pyqtSignal(Exception, basestring) + error = pyqtSignal(Exception, str) def __init__(self, hw_start, hw_end, watch_vars, ll_lat, ll_lon, ur_lat, ur_lon): QObject.__init__(self) self.hw_start = hw_start @@ -58,7 +62,7 @@ def run(self): try: output = self.webToTimeseries(self.hw_start, self.hw_end, self.watch_vars, self.ll_lat, self.ll_lon,self. ur_lat, self.ur_lon, self.update) self.finished.emit(output) - except Exception,e: + except Exception as e: self.error.emit(e, traceback.format_exc()) def webToTimeseries(self, hw_start, hw_end, watch_vars, ll_lat, ll_lon, ur_lat, ur_lon, update=None): @@ -83,7 +87,7 @@ def webToTimeseries(self, hw_start, hw_end, watch_vars, ll_lat, ll_lon, ur_lat, temp_netcdf = self.downloader.average_data(None, 'mean') return temp_netcdf -class WATCHData: +class WATCHData(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -176,14 +180,14 @@ def __init__(self, iface): gzip_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'WFDEIDownloader/WFDEI-land-long-lat-height.txt.gz') try: a = open(text_file) - except IOError,e: + except IOError as e: try: import gzip with gzip.open(gzip_file, 'rb') as zipFile: a = zipFile.read() with open(text_file, 'wb') as outFile: outFile.write(a) - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'ha', str(e)) raise Exception('Could not locate mappings textfile, nor decompress its zipped copy') @@ -326,7 +330,7 @@ def refine(self): refined_filename = self.fileDialog.getSaveFileName(None, "Save refined climate data to...", None, "Text Files (*.txt)") if (refined_filename is None) or (len(refined_filename) == 0): return - except Exception, e: + except Exception as e: QMessageBox.critical(None, "Error", str(e)) self.setRefinerButtonState(True) self.setDownloaderButtonState(True) @@ -392,35 +396,35 @@ def run(self): # Check the more unusual dependencies to prevent confusing errors later try: import pandas - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The WATCH data download/extract feature requires the pandas package ' 'to be installed. Please consult the FAQ in the manual for further ' 'information on how to install missing python packages.') return try: import ftplib - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The WATCH data download/extract feature requires the ftplib package ' 'to be installed. Please consult the FAQ in the manual for further ' 'information on how to install missing python packages.') return try: import scipy - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The WATCH data download/extract feature requires the scipy package ' 'to be installed. Please consult the FAQ in the manual for further ' 'information on how to install missing python packages.') return try: import requests - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The WATCH data download/extract feature requires the requests package ' 'to be installed. Please consult the FAQ in the manual for further ' 'information on how to install missing python packages.') return try: import netCDF4 as nc4 - except Exception, e: + except Exception as e: QMessageBox.critical(None, 'Error', 'The WATCH data download/extract feature requires the NetCDF4 Python package ' 'to be installed. Please consult the FAQ in the manual for further ' @@ -437,8 +441,7 @@ def folderAH(self): self.dlg.textOutput_AH.setText(self.folderPathAH[0]) def help(self): - url = "http://umep-docs.readthedocs.io/en/latest/pre-processor/Meteorological%20Data%20" \ - "Download%20data%20(WATCH).html" + url = "http://urban-climate.net/umep/UMEP_Manual#Meteorological_Data:_Download_data_.28WATCH.29" webbrowser.open_new_tab(url) def refine_worker_finished(self): @@ -471,7 +474,7 @@ def download(self): download_filename = self.fileDialog.getSaveFileName(None, "Save downloaded WATCH data to...", None, "NetCDF Files (*.nc)") if (download_filename is None) or (len(download_filename) == 0): return - except Exception, e: + except Exception as e: QMessageBox.critical(None, "Error", str(e)) return diff --git a/WATCHData/watch_dialog.py b/WATCHData/watch_dialog.py index d060b01..6679523 100644 --- a/WATCHData/watch_dialog.py +++ b/WATCHData/watch_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'watch_dialog_base.ui')) -class WATCHDataDialog(QtGui.QDialog, FORM_CLASS): +class WATCHDataDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(WATCHDataDialog, self).__init__(parent) diff --git a/WallHeight/icon.png b/WallHeight/icon.png new file mode 100644 index 0000000..f696c00 Binary files /dev/null and b/WallHeight/icon.png differ diff --git a/WallHeight/pb_tool.cfg b/WallHeight/pb_tool.cfg new file mode 100644 index 0000000..d2a46ed --- /dev/null +++ b/WallHeight/pb_tool.cfg @@ -0,0 +1,74 @@ +#/*************************************************************************** +# WallHeight +# +# Configuration file for plugin builder tool (pb_tool) +# ------------------- +# begin : 2015-09-16 +# copyright : (C) 2015 by Fredrik Lindberg +# email : fredrikl@gvc.gu.se +# ***************************************************************************/ +# +#/*************************************************************************** +# * * +# * This program is free software; you can redistribute it and/or modify * +# * it under the terms of the GNU General Public License as published by * +# * the Free Software Foundation; either version 2 of the License, or * +# * (at your option) any later version. * +# * * +# ***************************************************************************/ +# +# +# You can install pb_tool using: +# pip install http://geoapt.net/files/pb_tool.zip +# +# Consider doing your development (and install of pb_tool) in a virtualenv. +# +# For details on setting up and using pb_tool, see: +# http://spatialgalaxy.net/qgis-plugin-development-with-pb_tool +# +# Issues and pull requests here: +# https://github.com/g-sherman/plugin_build_tool: +# +# Sane defaults for your plugin generated by the Plugin Builder are +# already set below. +# +# As you add Python source files and UI files to your plugin, add +# them to the appropriate [files] section below. + +[plugin] +# Name of the plugin. This is the name of the directory that will +# be created in .qgis2/python/plugins +name: WallHeight + +[files] +# Python files that should be deployed with the plugin +python_files: __init__.py wall_height.py wall_height_dialog.py wallalgorithms.py + +# The main dialog file that is loaded (not compiled) +main_dialog: wall_height_dialog_base.ui + +# Other ui files for dialogs you create (these will be compiled) +compiled_ui_files: + +# Resource file(s) that will be compiled +resource_files: resources.qrc + +# Other files required for the plugin +extras: metadata.txt icon.png + +# Other directories to be deployed with the plugin. +# These must be subdirectories under the plugin directory +extra_dirs: + +# ISO code(s) for any locales (translations), separated by spaces. +# Corresponding .ts files must exist in the i18n directory +locales: + +[help] +# the built help directory that should be deployed with the plugin +dir: help/build/html +# the name of the directory to target in the deployed plugin +target: help + + + diff --git a/WallHeight/resources.py b/WallHeight/resources.py new file mode 100644 index 0000000..6ca9538 --- /dev/null +++ b/WallHeight/resources.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# Resource object code +# +# Created by: The Resource Compiler for PyQt5 (Qt v5.9.2) +# +# WARNING! All changes made in this file will be lost! + +from PyQt5 import QtCore + +qt_resource_data = b"\ +\x00\x00\x04\x0a\ +\x89\ +\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ +\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\ +\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\ +\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\ +\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\ +\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x02\x15\ +\x16\x11\x2c\x9d\x48\x83\xbb\x00\x00\x03\x8a\x49\x44\x41\x54\x48\ +\xc7\xad\x95\x4b\x68\x5c\x55\x18\xc7\x7f\xe7\xdc\x7b\x67\xe6\xce\ +\x4c\x66\x26\x49\xd3\x24\x26\xa6\xc6\xf8\x40\x21\xa5\x04\xb3\x28\ +\xda\x98\x20\xa5\x0b\xad\x55\xa8\x2b\xc5\x50\x1f\xa0\x6e\x34\x2b\ +\x45\x30\x14\x02\xba\x52\x69\x15\x17\x66\x63\x45\x97\x95\xa0\xad\ +\x0b\xfb\xc0\x06\x25\xb6\x71\x61\x12\x41\x50\xdb\x2a\x21\xd1\xe2\ +\x24\xf3\x9e\xc9\xcc\xbd\xe7\x1c\x17\x35\x43\x1e\x33\x21\xb6\xfd\ +\x56\x87\xf3\x9d\xfb\xfb\x1e\xf7\xff\x9d\x23\x8c\x31\x43\x95\xf4\ +\x85\x1e\x3f\x3b\x35\xac\xfd\xcc\x43\xdc\xa4\x49\x3b\xfe\x9d\x1d\ +\xdb\x7b\x22\x90\x78\xf8\xb2\x28\xa7\xbe\x7d\xc1\x4b\x9d\x79\xdf\ +\x18\x15\xe5\x16\x99\x10\x56\xde\x69\xdc\x3f\x22\xfd\xec\xd4\xf0\ +\xad\x04\x03\x18\xa3\xa2\x7e\x76\x6a\x58\xde\x68\x2b\xb4\x36\xf8\ +\xbe\xc6\x18\x53\xdb\xef\xe7\xfa\xec\xed\x67\x63\x10\x42\x00\xf0\ +\xfb\xd5\x65\x2a\x15\x45\xc7\x6d\x0d\x00\xc4\xa2\xc1\xaa\x6f\x0d\ +\x3e\x6c\xab\xc2\x1c\x56\xa4\x77\x4b\xb0\xf2\x35\x15\x5f\x21\x85\ +\xe0\xc8\x6b\x5f\x92\x2d\x37\x33\x39\xf9\x03\x27\x8e\x1f\xa2\xf7\ +\xbe\x9d\x04\x1c\x0b\x37\xe4\xac\xff\xa6\x30\x87\xbd\xba\x00\x6a\ +\x06\x79\xe5\xf5\xaf\x89\xd9\x92\xc5\xcc\x0a\xd9\x7c\x19\xcf\xe9\ +\xe2\xe4\xa9\x2f\x78\x7c\xff\x01\x72\x85\x0a\x2b\x65\x1f\xa5\x4c\ +\xb5\xb2\x55\x16\x80\xbd\x31\xda\xda\x20\x1f\x7d\x3e\xcd\xc2\xfd\ +\x59\xa6\x93\x39\x92\xd1\x22\xea\x9b\x16\xce\x9d\x3f\xce\xe0\x83\ +\x03\x24\x82\x59\x3a\xdb\x7b\x88\xc7\x82\x68\x63\x58\xc9\xcc\x62\ +\x8c\x21\x18\xb0\x6a\xc3\x37\x06\x49\x16\xff\x24\x6b\xa5\x49\xbb\ +\x25\xbc\xa2\xa6\x21\xbb\x40\x7f\xdf\x00\x83\xbd\x01\x8e\x3c\xd5\ +\x45\xd7\x8e\x6b\x9c\x9c\x98\x25\x1a\xb6\xe8\xbe\x3d\xc2\xdd\x77\ +\x44\x48\xc4\x1c\x22\xe1\xeb\x58\x59\xaf\xcf\xd3\x33\x29\x2e\x34\ +\x2d\x91\x93\x3e\xbe\x34\x78\x01\xc5\xe2\x61\xc5\xae\x72\x8e\x70\ +\xc8\xc2\x0d\x5a\xbc\xf5\xee\x2f\x9c\xfa\x3e\x86\x69\x7a\x8e\xcf\ +\x26\xe6\xf9\x63\xa1\x44\xa1\xa4\xd0\xda\x6c\x0d\x2f\x15\x7c\xb4\ +\x67\x28\x59\x0a\xcf\xd6\x54\xe2\x06\x13\x87\x2b\x6f\x68\xa6\x27\ +\xaf\x31\x32\x36\xc7\xb2\x7f\x17\xef\x7d\x7c\x8c\x33\x67\xcf\x12\ +\x70\x24\x4a\x69\xd6\x6a\x46\xd6\xd3\x70\x72\xa9\x82\x67\x34\x45\ +\xad\x28\xdb\x1a\x15\x34\x98\xff\x46\xed\xef\x37\x0d\x99\xbf\x4a\ +\x3c\x30\x38\xc0\xc8\x4b\xaf\x92\x5a\x9c\xe2\xe0\x23\x6d\x74\xb4\ +\xba\x84\x5d\x0b\x29\x45\x7d\xb8\x94\x82\x96\xb6\x10\xf3\xc5\x12\ +\x2a\xef\x53\x11\x1a\x63\xad\x3f\x93\x19\x85\xf1\xb1\x77\x58\x5a\ +\xf8\x99\x97\x9f\xe9\xa6\x75\x47\x90\xc6\xb8\x43\xd8\xb5\xb6\xce\ +\xfc\xfa\xfd\x00\xfb\x3e\xf4\xc8\x05\x35\xba\x5e\xeb\x46\x21\xf9\ +\xcf\x0a\xa9\x8c\x87\xe3\x48\xdc\x90\xb5\x6e\x98\x6a\xaa\x65\xf2\ +\x52\x92\x43\x2f\x5e\xc2\x8c\x02\x1a\x10\xf5\x07\xac\xc3\x75\x70\ +\x83\x92\x80\xb3\xf9\xd0\x26\xf8\x8f\xb3\x29\xc6\x3e\xb8\x8c\x19\ +\x35\x75\x6b\x7b\x7e\x3c\xca\x45\x0c\x7e\x49\x31\xf4\x58\x3b\xf7\ +\xf6\x34\x90\x88\x39\x04\x1c\x59\x1f\xfe\xdb\xd5\x3c\x5f\x9d\x4b\ +\x32\xfd\x44\xb2\xba\xd7\xfa\xb6\x60\xcf\xde\x16\xdc\x90\x45\x4c\ +\x4a\x2a\x9e\x62\xfe\x4e\xc5\xc8\xc1\x4e\xda\x76\x86\xe8\xe9\x0a\ +\xe3\xd8\x92\x58\xd4\xc6\xb2\x44\x6d\x78\x2a\x53\xe1\xca\x7c\x99\ +\x63\x5d\xbf\x56\x9d\xbd\x9f\x44\x18\x7a\xba\x95\x27\x0f\xb4\xd3\ +\xdc\x18\xc0\xf3\x0d\x52\x40\xd8\xb5\xb0\xa4\x20\x14\xb2\x70\x6c\ +\x81\x63\xcb\xaa\x42\xd6\xfd\xb7\xf4\xec\xa3\x06\xa0\x50\x52\xd8\ +\x4e\x1b\x7e\x4a\xd3\x31\xf9\x29\xcf\xfe\xd4\x49\x7f\x5f\x13\xfb\ +\xfa\x9b\x71\x43\x92\x58\xd4\x21\x18\x90\xac\xde\xb0\x42\x50\x13\ +\x58\x33\xf3\x88\x6b\xa1\xfd\x65\x96\xf2\x79\xc6\x43\x7b\xd8\x75\ +\x38\xcc\x3d\xdd\xd1\xaa\xcf\x71\xe4\xff\x7f\x91\x56\x33\xaf\xea\ +\x37\xe7\xa1\x94\x21\x16\xb5\xd1\x06\x2c\x29\x36\xf5\x72\x9b\x96\ +\x95\xc0\xc4\xda\x9d\x78\x83\x43\x53\x22\x80\x65\x09\x1c\xfb\x86\ +\xc1\x00\xe7\x25\x70\x14\x48\x6f\x1e\x22\x51\xe3\x75\xd9\xb6\xa5\ +\x81\xa3\x32\xb1\xfb\xf4\x0c\x30\xb8\xb1\x82\x9b\xb0\x09\x60\x30\ +\xb1\xfb\xf4\xcc\xbf\xa0\xe9\x6e\xae\x5a\xdf\x4b\x81\x00\x00\x00\ +\x00\x49\x45\x4e\x44\xae\x42\x60\x82\ +" + +qt_resource_name = b"\ +\x00\x07\ +\x07\x3b\xe0\xb3\ +\x00\x70\ +\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\ +\x00\x0a\ +\x00\xe0\x4d\x94\ +\x00\x57\ +\x00\x61\x00\x6c\x00\x6c\x00\x48\x00\x65\x00\x69\x00\x67\x00\x68\x00\x74\ +\x00\x08\ +\x0a\x61\x5a\xa7\ +\x00\x69\ +\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\ +" + +qt_resource_struct_v1 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ +\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ +\x00\x00\x00\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +" + +qt_resource_struct_v2 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x01\x4f\xc1\xdc\x7e\xad\ +" + +qt_version = QtCore.qVersion().split('.') +if qt_version < ['5', '8', '0']: + rcc_version = 1 + qt_resource_struct = qt_resource_struct_v1 +else: + rcc_version = 2 + qt_resource_struct = qt_resource_struct_v2 + +def qInitResources(): + QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +def qCleanupResources(): + QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) + +qInitResources() diff --git a/WallHeight/resources_rc.py b/WallHeight/resources_rc.py index 7b87f41..77fd252 100644 --- a/WallHeight/resources_rc.py +++ b/WallHeight/resources_rc.py @@ -7,7 +7,7 @@ # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from qgis.PyQt import QtCore qt_resource_data = "\ \x00\x00\x04\x0a\ diff --git a/WallHeight/wall_height.py b/WallHeight/wall_height.py index 654871d..23626b5 100644 --- a/WallHeight/wall_height.py +++ b/WallHeight/wall_height.py @@ -20,18 +20,24 @@ * * ***************************************************************************/ """ -from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QThread -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox +from __future__ import absolute_import +from future import standard_library +standard_library.install_aliases() +from builtins import str +from builtins import object +from qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, QThread +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon from qgis.gui import * -from qgis.core import QgsMessageLog -from wall_height_dialog import WallHeightDialog +from qgis.core import QgsMessageLog, QgsMapLayerProxyModel, Qgis +from .wall_height_dialog import WallHeightDialog import os.path -import wallalgorithms as wa +from . import wallalgorithms as wa from ..Utilities.misc import * -from wallworker import Worker +from .wallworker import Worker import webbrowser -class WallHeight: +class WallHeight(object): """QGIS Plugin Implementation.""" def __init__(self, iface): @@ -164,7 +170,7 @@ def save_file_place_height(self): result = self.fileDialog.exec_() if result == 1: self.filePathH = self.fileDialog.selectedFiles() - self.filePathH[0] = self.filePathH[0] + '.tif' + self.filePathH[0] = self.filePathH[0] # + '.tif' self.dlg.textOutputHeight.setText(self.filePathH[0]) self.dlg.runButton.setEnabled(1) @@ -173,7 +179,7 @@ def save_file_place_aspect(self): result = self.fileDialog.exec_() if result == 1: self.filePathA = self.fileDialog.selectedFiles() - self.filePathA[0] = self.filePathA[0] + '.tif' + self.filePathA[0] = self.filePathA[0] # + '.tif' self.dlg.textOutputAspect.setText(self.filePathA[0]) def run(self): @@ -297,7 +303,7 @@ def workerFinished(self, ret): self.dlg.pushButton.setEnabled(True) else: # notify the user that something went wrong - self.iface.messageBar().pushMessage('Operations cancelled either by user or error. See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.', level=QgsMessageBar.CRITICAL, duration=5) + self.iface.messageBar().pushMessage('Operations cancelled either by user or error. See the General tab in Log Meassages Panel (speech bubble, lower right) for more information.', level=Qgis.Critical, duration=5) self.dlg.runButton.setText('Run') self.dlg.runButton.clicked.disconnect() self.dlg.runButton.clicked.connect(self.start_progress) @@ -305,13 +311,14 @@ def workerFinished(self, ret): self.dlg.progressBar.setValue(0) def workerError(self, errorstring): - QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + # QgsMessageLog.logMessage(errorstring, level=QgsMessageLog.CRITICAL) + QgsMessageLog.logMessage(errorstring, level=Qgis.Critical) def progress_update(self): self.steps += 1 self.dlg.progressBar.setValue(self.steps) def help(self): - url = 'http://umep-docs.readthedocs.io/en/latest/pre-processor/Urban%20Geometry%20Wall%20' \ - 'Height%20and%20Aspect.html' + # url = "file://" + self.plugin_dir + "/help/Index.html" + url = 'http://www.urban-climate.net/umep/UMEP_Manual#Urban_Geometry:_Wall_Height_and_Aspect' webbrowser.open_new_tab(url) \ No newline at end of file diff --git a/WallHeight/wall_height_dialog.py b/WallHeight/wall_height_dialog.py index 092d566..fa1aa5b 100644 --- a/WallHeight/wall_height_dialog.py +++ b/WallHeight/wall_height_dialog.py @@ -23,13 +23,14 @@ import os -from PyQt4 import QtGui, uic +from qgis.PyQt import uic +from qgis.PyQt.QtWidgets import QDialog FORM_CLASS, _ = uic.loadUiType(os.path.join( os.path.dirname(__file__), 'wall_height_dialog_base.ui')) -class WallHeightDialog(QtGui.QDialog, FORM_CLASS): +class WallHeightDialog(QDialog, FORM_CLASS): def __init__(self, parent=None): """Constructor.""" super(WallHeightDialog, self).__init__(parent) diff --git a/WallHeight/wallalgorithms.py b/WallHeight/wallalgorithms.py index bad24cc..a3abef3 100644 --- a/WallHeight/wallalgorithms.py +++ b/WallHeight/wallalgorithms.py @@ -1,3 +1,4 @@ +from builtins import range # -*- coding: utf-8 -*- __author__ = 'xlinfr' diff --git a/WallHeight/wallworker.py b/WallHeight/wallworker.py index 2532a38..fb5ef13 100644 --- a/WallHeight/wallworker.py +++ b/WallHeight/wallworker.py @@ -1,9 +1,11 @@ -from PyQt4 import QtCore, QtGui +from __future__ import absolute_import +from builtins import range +from qgis.PyQt import QtCore import traceback import numpy as np import scipy.misc as sc import math -from wallalgorithms import get_ders +from .wallalgorithms import get_ders import linecache import sys @@ -73,11 +75,11 @@ def run(self): if h == 30: filtmatrixbuild[:, filtmatrix.shape[0] - 1] = 0 if index == 225: - n = filtmatrix.shape[0] - 1 #length(filtmatrix); + n = filtmatrix.shape[0] - 1 filtmatrix1[0, 0] = 1 filtmatrix1[n, n] = 1 if index == 135: - n = filtmatrix.shape[0] - 1 #length(filtmatrix); + n = filtmatrix.shape[0] - 1 filtmatrix1[0, n] = 1 filtmatrix1[n, 0] = 1 diff --git a/help/build/doctrees/environment.pickle b/help/build/doctrees/environment.pickle index c781ce9..4acfb91 100644 Binary files a/help/build/doctrees/environment.pickle and b/help/build/doctrees/environment.pickle differ diff --git a/help/build/doctrees/index.doctree b/help/build/doctrees/index.doctree index b9827e6..0886d48 100644 Binary files a/help/build/doctrees/index.doctree and b/help/build/doctrees/index.doctree differ diff --git a/help/build/html/.buildinfo b/help/build/html/.buildinfo index 47e7703..d0b0b48 100644 --- a/help/build/html/.buildinfo +++ b/help/build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 59460e14654d05b27e17d8763b43d932 +config: e8e6f38ceabb88c6d9b0a0b6d938e2a4 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/help/build/html/_static/basic.css b/help/build/html/_static/basic.css index 6df76b0..19ced10 100644 --- a/help/build/html/_static/basic.css +++ b/help/build/html/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -82,9 +82,21 @@ div.sphinxsidebar input { } div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; } + img { border: 0; max-width: 100%; @@ -199,6 +211,11 @@ table.modindextable td { /* -- general body styles --------------------------------------------------- */ +div.body { + min-width: 450px; + max-width: 800px; +} + div.body p, div.body dd, div.body li, div.body blockquote { -moz-hyphens: auto; -ms-hyphens: auto; @@ -332,6 +349,11 @@ table.docutils { border-collapse: collapse; } +table.align-center { + margin-left: auto; + margin-right: auto; +} + table caption span.caption-number { font-style: italic; } @@ -445,10 +467,14 @@ dd { margin-left: 30px; } -dt:target, .highlighted { +dt:target, span.highlighted { background-color: #fbe54e; } +rect.highlighted { + fill: #fbe54e; +} + dl.glossary dt { font-weight: bold; font-size: 1.1em; diff --git a/help/build/html/_static/classic.css b/help/build/html/_static/classic.css index 22fa0bd..6cfbfb9 100644 --- a/help/build/html/_static/classic.css +++ b/help/build/html/_static/classic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- classic theme. * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/help/build/html/_static/doctools.js b/help/build/html/_static/doctools.js index 5654977..0c15c00 100644 --- a/help/build/html/_static/doctools.js +++ b/help/build/html/_static/doctools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for all documentation. * - * :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -45,7 +45,7 @@ jQuery.urlencode = encodeURIComponent; * it will always return arrays of strings for the value parts. */ jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') + if (typeof s === 'undefined') s = document.location.search; var parts = s.substr(s.indexOf('?') + 1).split('&'); var result = {}; @@ -66,29 +66,53 @@ jQuery.getQueryParameters = function(s) { * span elements with the given class name. */ jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { + function highlight(node, addItems) { + if (node.nodeType === 3) { var val = node.nodeValue; var pos = val.toLowerCase().indexOf(text); if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } span.appendChild(document.createTextNode(val.substr(pos, text.length))); node.parentNode.insertBefore(span, node.parentNode.insertBefore( document.createTextNode(val.substr(pos + text.length)), node.nextSibling)); node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } } } else if (!jQuery(node).is("button, select, textarea")) { jQuery.each(node.childNodes, function() { - highlight(this); + highlight(this, addItems); }); } } - return this.each(function() { - highlight(this); + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; }; /* @@ -131,21 +155,21 @@ var Documentation = { * i18n support */ TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, LOCALE : 'unknown', // gettext and ngettext don't access this so that the functions // can safely bound to a different name (_ = Documentation.gettext) gettext : function(string) { var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') + if (typeof translated === 'undefined') return string; - return (typeof translated == 'string') ? translated : translated[0]; + return (typeof translated === 'string') ? translated : translated[0]; }, ngettext : function(singular, plural, n) { var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') + if (typeof translated === 'undefined') return (n == 1) ? singular : plural; return translated[Documentation.PLURALEXPR(n)]; }, @@ -180,7 +204,7 @@ var Documentation = { * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 */ fixFirefoxAnchorBug : function() { - if (document.location.hash) + if (document.location.hash && $.browser.mozilla) window.setTimeout(function() { document.location.href += ''; }, 10); @@ -216,7 +240,7 @@ var Documentation = { var src = $(this).attr('src'); var idnum = $(this).attr('id').substr(7); $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') + if (src.substr(-9) === 'minus.png') $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); else $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); @@ -248,7 +272,7 @@ var Documentation = { var path = document.location.pathname; var parts = path.split(/\//); $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') + if (this === '..') parts.pop(); }); var url = parts.join('/'); diff --git a/help/build/html/_static/documentation_options.js b/help/build/html/_static/documentation_options.js new file mode 100644 index 0000000..fb47b84 --- /dev/null +++ b/help/build/html/_static/documentation_options.js @@ -0,0 +1,9 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: '', + VERSION: '0.1', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt' +}; \ No newline at end of file diff --git a/help/build/html/_static/jquery-3.2.1.js b/help/build/html/_static/jquery-3.2.1.js new file mode 100644 index 0000000..d2d8ca4 --- /dev/null +++ b/help/build/html/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + - - + \ No newline at end of file diff --git a/help/build/html/index.html b/help/build/html/index.html index b6ff46c..e47028c 100644 --- a/help/build/html/index.html +++ b/help/build/html/index.html @@ -4,27 +4,18 @@ + Welcome to UMPE’s documentation! — UMPE 0.1 documentation - + - - + @@ -99,7 +92,7 @@

Navigation

\ No newline at end of file diff --git a/help/build/html/search.html b/help/build/html/search.html index febe134..ca57b52 100644 --- a/help/build/html/search.html +++ b/help/build/html/search.html @@ -4,20 +4,12 @@ + Search — UMPE 0.1 documentation - + @@ -31,8 +23,7 @@ - - + \ No newline at end of file diff --git a/help/build/html/searchindex.js b/help/build/html/searchindex.js index 1b456f8..088378a 100644 --- a/help/build/html/searchindex.js +++ b/help/build/html/searchindex.js @@ -1 +1 @@ -Search.setIndex({docnames:["index"],envversion:52,filenames:["index.rst"],objects:{},objnames:{},objtypes:{},terms:{content:0,index:0,modul:0,page:0,search:0},titles:["Welcome to UMPE\u2019s documentation!"],titleterms:{document:0,indic:0,tabl:0,ump:0,welcom:0}}) \ No newline at end of file +Search.setIndex({docnames:["index"],envversion:53,filenames:["index.rst"],objects:{},objnames:{},objtypes:{},terms:{content:0,index:0,modul:0,page:0,search:0},titles:["Welcome to UMPE\u2019s documentation!"],titleterms:{document:0,indic:0,tabl:0,ump:0,welcom:0}}) \ No newline at end of file diff --git a/metadata.txt b/metadata.txt index 0369249..9812107 100644 --- a/metadata.txt +++ b/metadata.txt @@ -8,7 +8,7 @@ [general] name=UMEP -qgisMinimumVersion=2.0 +qgisMinimumVersion=3.0 description=Urban Multi-scale Environmental Predictor version=1.5 @@ -26,7 +26,7 @@ repository=https://bitbucket.org/fredrik_ucg/umep # Uncomment the following line and add your changelog: changelog= - 1.5 : LONG TERM RELEASE. + 1.5 : LONG TERM RELEASE. Early adopter release. Some parts of the plugin is not yet operational. 1.4.2 : Response to issue #42 and #46. Initiation of migration to QGIS3. 1.4.1 : Bug fix in land cover fraction calculations 1.4 : Response to issue #36 and bugfix in LQf @@ -80,7 +80,7 @@ changelog= # Tags are comma separated with spaces allowed tags=urban climate, urban planning -homepage=http://umep-docs.readthedocs.io/en/latest/index.html +homepage=http://www.urban-climate.net/umep/ category=Plugins icon=Icons/icon_umep.png diff --git a/resources.py b/resources.py index 64ea01e..cf9f86a 100644 --- a/resources.py +++ b/resources.py @@ -2,14 +2,13 @@ # Resource object code # -# Created: tor 16. nov 10:34:47 2017 -# by: The Resource Compiler for PyQt (Qt v4.8.5) +# Created by: The Resource Compiler for PyQt5 (Qt v5.9.2) # # WARNING! All changes made in this file will be lost! -from PyQt4 import QtCore +from PyQt5 import QtCore -qt_resource_data = "\ +qt_resource_data = b"\ \x00\x00\x04\x0a\ \x89\ \x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\ @@ -79,7 +78,7 @@ \x00\x49\x45\x4e\x44\xae\x42\x60\x82\ " -qt_resource_name = "\ +qt_resource_name = b"\ \x00\x07\ \x07\x3b\xe0\xb3\ \x00\x70\ @@ -94,17 +93,36 @@ \x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\ " -qt_resource_struct = "\ +qt_resource_struct_v1 = b"\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ \x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ \x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ \x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ " +qt_resource_struct_v2 = b"\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\ +\x00\x00\x00\x00\x00\x00\x00\x00\ +\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\ +\x00\x00\x01\x53\x13\xbb\x0f\x3c\ +" + +qt_version = QtCore.qVersion().split('.') +if qt_version < ['5', '8', '0']: + rcc_version = 1 + qt_resource_struct = qt_resource_struct_v1 +else: + rcc_version = 2 + qt_resource_struct = qt_resource_struct_v2 + def qInitResources(): - QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) + QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): - QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data) + QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources() diff --git a/suewsmodel/BaseFiles/RunControl.nml b/suewsmodel/BaseFiles/RunControl.nml index af46597..17cff4b 100644 --- a/suewsmodel/BaseFiles/RunControl.nml +++ b/suewsmodel/BaseFiles/RunControl.nml @@ -6,7 +6,7 @@ NetRadiationMethod=3 EmissionsMethod=2 StorageHeatMethod=1 OHMIncQF=0 -StabilityMethod=3 +StabilityMethod=2 RoughLenHeatMethod=2 RoughLenMomMethod=2 SMDMethod=0 diff --git a/suewsmodel/Suews_wrapper_v2018a.py b/suewsmodel/Suews_wrapper_v2018a.py index f7b521c..75df5db 100644 --- a/suewsmodel/Suews_wrapper_v2018a.py +++ b/suewsmodel/Suews_wrapper_v2018a.py @@ -1,12 +1,16 @@ +from __future__ import print_function +from __future__ import absolute_import +from builtins import str __author__ = 'xlinfr' -from PyQt4.QtGui import QAction, QIcon, QFileDialog, QMessageBox, QColor +from qgis.PyQt.QtWidgets import QAction, QFileDialog, QMessageBox +from qgis.PyQt.QtGui import QIcon, QColor def wrapper(pathtoplugin): import numpy as np - import suewsdataprocessing - import suewsplotting + from . import suewsdataprocessing + from . import suewsplotting import subprocess from ..Utilities import f90nml import os diff --git a/suewsmodel/suewsdataprocessing.py b/suewsmodel/suewsdataprocessing.py index b39da62..c6968cd 100644 --- a/suewsmodel/suewsdataprocessing.py +++ b/suewsmodel/suewsdataprocessing.py @@ -1,3 +1,7 @@ +from __future__ import print_function +from builtins import input +from builtins import range +from builtins import object __author__ = 'Fredrik Lindberg' # This class will be used to prepare input met data into UMEP @@ -21,7 +25,7 @@ def leap_year(yy): return leapyear -class SuewsDataProcessing: +class SuewsDataProcessing(object): def __init__(self): pass @@ -180,18 +184,18 @@ def from5minto1hour_v1(self, results, SumCol, LastCol, TimeCol): def from5mintoanytime(self, results, SumCol, LastCol, TimeCol, minint): splitparts = minint / 5 - suews_anytime = np.zeros((results.shape[0] / splitparts, results.shape[1])) + suews_anytime = np.zeros((int(results.shape[0] / splitparts), results.shape[1])) for i in range(0, suews_anytime.shape[0]): - suews_anytime[i, 5:results.shape[1] - 1] = np.mean(results[i * splitparts: i * splitparts + splitparts, 5:results.shape[1] - 1], axis=0) + suews_anytime[i, 5:results.shape[1] - 1] = np.mean(results[int(i * splitparts): int(i * splitparts + splitparts), 5:results.shape[1] - 1], axis=0) for j in range(0, SumCol.__len__()): - suews_anytime[i, SumCol[j]] = np.sum(results[i * splitparts: i * splitparts + splitparts, SumCol[j]], axis=0) + suews_anytime[i, SumCol[j]] = np.sum(results[int(i * splitparts): int(i * splitparts + splitparts), SumCol[j]], axis=0) for j in range(0, LastCol.__len__()): - suews_anytime[i, LastCol[j]] = results[i * splitparts + splitparts - 1, LastCol[j]] + suews_anytime[i, LastCol[j]] = results[int(i * splitparts + splitparts - 1), LastCol[j]] - suews_anytime[i, TimeCol] = results[i * splitparts + splitparts - 1, TimeCol] + suews_anytime[i, TimeCol] = results[int(i * splitparts + splitparts - 1), TimeCol] return suews_anytime @@ -239,72 +243,74 @@ def translatemetdata(self, old, ver, inputdata, outputdata, delim): else: met_old = np.loadtxt(inputdata, skiprows=1) - user_input = int(input('Put in manually or translate from v2014 (1 or 0)?: ')) - yyyy_exist = int(input('yyyy exist (1 or 0)?: ')) + user_input = int(eval(input('Put in manually or translate from v2014 (1 or 0)?: '))) + yyyy_exist = int(eval(input('yyyy exist (1 or 0)?: '))) if yyyy_exist == 1: - yyyy_col = int(input('column for yyyy: ')) - 1 + yyyy_col = int(eval(input('column for yyyy: '))) - 1 else: - yy = int(input('Specify year (yyyy): ')) + yy = int(eval(input('Specify year (yyyy): '))) if user_input == 1: - doy_exist = int(input('doy exist (1 or 0)?: ')) + doy_exist = int(eval(input('doy exist (1 or 0)?: '))) if doy_exist == 1: - doy_col = int(input('column for doy: ')) - 1 + doy_col = int(eval(input('column for doy: '))) - 1 else: - month_col = int(input('column for month: ')) - 1 - day_col = int(input('column for day of month: ')) - 1 + month_col = int(eval(input('column for month: '))) - 1 + day_col = int(eval(input('column for day of month: '))) - 1 - hh_col = int(input('column for hour: ')) - 1 - dectime_exist = int(input('dectime exist (1 or 0)?: ')) + hh_col = int(eval(input('column for hour: '))) - 1 + dectime_exist = int(eval(input('dectime exist (1 or 0)?: '))) if dectime_exist == 1: - dectime_col = int(input('column for dectime: ')) - 1 + dectime_col = int(eval(input('column for dectime: '))) - 1 if ver == 2015: dechour = (met_old[:, dectime_col] - np.floor(met_old[:, dectime_col])) * 24 minute = np.round((dechour - np.floor(dechour)) * 60) minute[(minute == 60)] = 0 else: - min_col = int(input('column for min: ')) - 1 + min_col = int(eval(input('column for min: '))) - 1 - only_mandatory = int(input('Only put in mandatory [Ta,RH,Kdn,pres,Ws] (1 or 0)?: ')) + only_mandatory = int(eval(input('Only put in mandatory [Ta,RH,Kdn,pres,Ws] (1 or 0)?: '))) if only_mandatory == 1: - wind_col = int(input('column for Ws: ')) - 1 - RH_col = int(input('column for RH: ')) - 1 - Ta_col = int(input('column for Ta: ')) - 1 - press_exist = int(input('Pressure exist (1 or 0)?: ')) + wind_col = int(eval(input('column for Ws: '))) - 1 + RH_col = int(eval(input('column for RH: '))) - 1 + Ta_col = int(eval(input('column for Ta: '))) - 1 + press_exist = int(eval(input('Pressure exist (1 or 0)?: '))) if press_exist == 1: - press_col = int(input('column for Pressure (kPa): ')) - 1 + press_col = int(eval(input('column for Pressure (kPa): '))) - 1 else: press_av = 101.3 - print 'Pressure set to 101.3 kPa' + # fix_print_with_import + print('Pressure set to 101.3 kPa') - grad_col = int(input('column for Kdn: ')) - 1 + grad_col = int(eval(input('column for Kdn: '))) - 1 else: - Qstar_col = int(input('column for Q*: ')) - 1 - Qh_col = int(input('column for Qh: ')) - 1 - Qe_col = int(input('column for Qe: ')) - 1 - Qs_col = int(input('column for Qs: ')) - 1 - Qf_col = int(input('column for Qf: ')) - 1 - wind_col = int(input('column for Ws: ')) - 1 - RH_col = int(input('column for RH: ')) - 1 - Ta_col = int(input('column for Ta: ')) - 1 - press_exist = int(input('Pressure exist (1 or 0)?: ')) + Qstar_col = int(eval(input('column for Q*: '))) - 1 + Qh_col = int(eval(input('column for Qh: '))) - 1 + Qe_col = int(eval(input('column for Qe: '))) - 1 + Qs_col = int(eval(input('column for Qs: '))) - 1 + Qf_col = int(eval(input('column for Qf: '))) - 1 + wind_col = int(eval(input('column for Ws: '))) - 1 + RH_col = int(eval(input('column for RH: '))) - 1 + Ta_col = int(eval(input('column for Ta: '))) - 1 + press_exist = int(eval(input('Pressure exist (1 or 0)?: '))) if press_exist == 1: - press_col = int(input('column for Pressure (kPa): ')) - 1 + press_col = int(eval(input('column for Pressure (kPa): '))) - 1 else: press_av = 101.3 - print 'Pressure set to 101.3 kPa' - rain_col = int(input('column for rain: ')) - 1 - grad_col = int(input('column for Kdn: ')) - 1 - snow_col = int(input('column for snow: ')) - 1 - ldown_col = int(input('column for ldown: ')) - 1 - fcld_col = int(input('column for fcld: ')) - 1 - wuh_col = int(input('column for wuh: ')) - 1 - xsmd_col = int(input('column for xsmd: ')) - 1 - lai_col = int(input('column for lai: ')) - 1 - drad_col = int(input('column for kdiff: ')) - 1 - irad_col = int(input('column for kdir: ')) - 1 - wdir_col = int(input('column for Wdir: ')) - 1 + # fix_print_with_import + print('Pressure set to 101.3 kPa') + rain_col = int(eval(input('column for rain: '))) - 1 + grad_col = int(eval(input('column for Kdn: '))) - 1 + snow_col = int(eval(input('column for snow: '))) - 1 + ldown_col = int(eval(input('column for ldown: '))) - 1 + fcld_col = int(eval(input('column for fcld: '))) - 1 + wuh_col = int(eval(input('column for wuh: '))) - 1 + xsmd_col = int(eval(input('column for xsmd: '))) - 1 + lai_col = int(eval(input('column for lai: '))) - 1 + drad_col = int(eval(input('column for kdiff: '))) - 1 + irad_col = int(eval(input('column for kdir: '))) - 1 + wdir_col = int(eval(input('column for Wdir: '))) - 1 else: doy_col = 1 - 1 diff --git a/suewsmodel/suewsplotting.py b/suewsmodel/suewsplotting.py index cd90065..1db62c3 100644 --- a/suewsmodel/suewsplotting.py +++ b/suewsmodel/suewsplotting.py @@ -1,3 +1,5 @@ +from builtins import range +from builtins import object __author__ = 'Fredrik Lindberg' # This class will be used to plot output result from Suews @@ -35,7 +37,7 @@ def make_dectime(dataout): return dectime -class SuewsPlotting: +class SuewsPlotting(object): def __init__(self): pass