forked from micasense/imageprocessing
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add 10-band batch processing example with multiprocess batch stack sa…
…ving
- Loading branch information
Showing
5 changed files
with
391 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,346 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"# 10-band Batch Processing Example\n", | ||
"\n", | ||
"In this example, we use the `micasense.imageset` class to load a set of directories of images into a list of `micasense.capture` objects, and we iterate over that list saving out each image as an aligned stack of images as separate bands in a single tiff file each. Next, we use the metadata from the original captures to write out a log file of the captures and their locations. Finally, we use `exiftool` from the command line to inject that metadata into the processed images, allowing us to stitch those images using commercial software such as Pix4D or Agisoft.\n", | ||
"\n", | ||
"For an example dataset, download and unzip either sample dataset at https://www.micasense.com/dual-camera-sample-data into your Downloads folder, and ensure the paths below point to the correct location." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"%load_ext autoreload\n", | ||
"%autoreload 2" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Load Images into ImageSet" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"from ipywidgets import FloatProgress, Layout\n", | ||
"from IPython.display import display\n", | ||
"import micasense.imageset as imageset\n", | ||
"import micasense.capture as capture\n", | ||
"import os, glob\n", | ||
"import multiprocessing\n", | ||
"\n", | ||
"panelNames = None\n", | ||
"useDLS = True\n", | ||
"\n", | ||
"imagePath = os.path.expanduser(os.path.join('~','Downloads','DualCam-Farm','farm_only'))\n", | ||
"panelNames = glob.glob(os.path.join(imagePath,'IMG_0002_*.tif'))\n", | ||
"\n", | ||
"outputPath = os.path.join(imagePath,'..','stacks')\n", | ||
"thumbnailPath = os.path.join(outputPath, '..', 'thumbnails')\n", | ||
"\n", | ||
"overwrite = False # Set to False to continue interrupted processing\n", | ||
"generateThumbnails = True\n", | ||
"\n", | ||
"# Allow this code to align both radiance and reflectance images; bu excluding\n", | ||
"# a definition for panelNames above, radiance images will be used\n", | ||
"# For panel images, efforts will be made to automatically extract the panel information\n", | ||
"# but if the panel/firmware is before Altum 1.3.5, RedEdge 5.1.7 the panel reflectance\n", | ||
"# will need to be set in the panel_reflectance_by_band variable.\n", | ||
"# Note: radiance images will not be used to properly create NDVI/NDRE images below.\n", | ||
"if panelNames is not None:\n", | ||
" panelCap = capture.Capture.from_filelist(panelNames)\n", | ||
"else:\n", | ||
" panelCap = None\n", | ||
"\n", | ||
"if panelCap is not None:\n", | ||
" if panelCap.panel_albedo() is not None:\n", | ||
" panel_reflectance_by_band = panelCap.panel_albedo()\n", | ||
" else:\n", | ||
" panel_reflectance_by_band = [0.65]*len(panelCap.images) #inexact, but quick\n", | ||
" panel_irradiance = panelCap.panel_irradiance(panel_reflectance_by_band) \n", | ||
" img_type = \"reflectance\"\n", | ||
"else:\n", | ||
" if useDLS:\n", | ||
" img_type='reflectance'\n", | ||
" else:\n", | ||
" img_type = \"radiance\"" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"## This progress widget is used for display of the long-running process\n", | ||
"f = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description=\"Loading\")\n", | ||
"display(f)\n", | ||
"def update_f(val):\n", | ||
" if (val - f.value) > 0.005 or val == 1: #reduces cpu usage from updating the progressbar by 10x\n", | ||
" f.value=val\n", | ||
"\n", | ||
"%time imgset = imageset.ImageSet.from_directory(imagePath, progress_callback=update_f)\n", | ||
"update_f(1.0)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"scrolled": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"import math\n", | ||
"import numpy as np\n", | ||
"from mapboxgl.viz import *\n", | ||
"from mapboxgl.utils import df_to_geojson, create_radius_stops, scale_between\n", | ||
"from mapboxgl.utils import create_color_stops\n", | ||
"import pandas as pd\n", | ||
"\n", | ||
"data, columns = imgset.as_nested_lists()\n", | ||
"df = pd.DataFrame.from_records(data, index='timestamp', columns=columns)\n", | ||
"\n", | ||
"#Insert your mapbox token here\n", | ||
"token = 'pk.eyJ1IjoibWljYXNlbnNlIiwiYSI6ImNqYWx5dWNteTJ3cWYzMnBicmZid3g2YzcifQ.Zrq9t7GYocBtBzYyT3P4sw'\n", | ||
"color_property = 'dls-yaw'\n", | ||
"color_property = 'altitude'\n", | ||
"num_color_classes = 8\n", | ||
"\n", | ||
"min_val = df[color_property].min()\n", | ||
"max_val = df[color_property].max()\n", | ||
"\n", | ||
"import jenkspy\n", | ||
"breaks = jenkspy.jenks_breaks(df[color_property], nb_class=num_color_classes)\n", | ||
"\n", | ||
"color_stops = create_color_stops(breaks,colors='YlOrRd')\n", | ||
"geojson_data = df_to_geojson(df,columns[3:],lat='latitude',lon='longitude')\n", | ||
"\n", | ||
"viz = CircleViz(geojson_data, access_token=token, color_property=color_property,\n", | ||
" color_stops=color_stops,\n", | ||
" center=[df['longitude'].median(),df['latitude'].median()], \n", | ||
" zoom=16, height='600px',\n", | ||
" style='mapbox://styles/mapbox/satellite-streets-v9')\n", | ||
"viz.show()" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Define which warp method to use\n", | ||
"For newer data sets with RigRelatives tags (images captured with RedEdge version 3.4.0 or greater with a valid calibration load, see https://support.micasense.com/hc/en-us/articles/360005428953-Updating-RedEdge-for-Pix4Dfields), we can use the RigRelatives for a simple alignment.\n", | ||
"\n", | ||
"For sets without those tags, or sets that require a RigRelatives optimization, we can go through the Alignment.ipynb notebook and get a set of `warp_matrices` that we can use here to align." | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"from numpy import array\n", | ||
"from numpy import float32\n", | ||
"\n", | ||
"# Use the warp_matrices derived from the Alignment Tutorial for this RedEdge set without RigRelatives\n", | ||
"warp_matrices = [array([[ 1.0020243e+00, -3.7388311e-04, 2.4971788e+01],\n", | ||
" [ 6.7297497e-04, 1.0005866e+00, 1.7188536e+01],\n", | ||
" [ 2.4259109e-06, -9.2373267e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9140632e-01, -4.6332614e-05, 4.8500401e+01],\n", | ||
" [ 3.2340995e-05, 9.9200422e-01, -1.0915921e+01],\n", | ||
" [-7.3704086e-07, 5.0890253e-07, 1.0000000e+00]], dtype=float32), array([[ 1.0018263e+00, -2.1731904e-04, 5.5316315e+00],\n", | ||
" [ 7.2411756e-04, 1.0021795e+00, 5.8745198e+00],\n", | ||
" [-1.9047379e-08, 9.7758209e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9152303e-01, -5.4825414e-03, 4.1536880e+01],\n", | ||
" [ 3.8441001e-03, 9.9495757e-01, 1.7250452e+01],\n", | ||
" [-3.2921032e-06, -2.4233820e-08, 1.0000000e+00]], dtype=float32), array([[ 1.0006192e+00, -3.0658240e-04, -2.5816131e-01],\n", | ||
" [ 7.8755329e-05, 9.9954307e-01, 2.9809377e-01],\n", | ||
" [ 9.1640561e-07, -1.0784843e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9773926e-01, -6.3800282e-04, 5.2199936e+01],\n", | ||
" [-3.4246168e-03, 9.9601907e-01, 2.0550659e+01],\n", | ||
" [-4.6251063e-07, -4.8716843e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9622118e-01, 3.1637053e-03, 3.7498917e+01],\n", | ||
" [-6.7951437e-03, 9.9743211e-01, 8.9517927e+00],\n", | ||
" [-3.6472218e-06, -2.4649705e-06, 1.0000000e+00]], dtype=float32), array([[ 9.8943901e-01, 3.7658634e-04, 9.4948044e+00],\n", | ||
" [-4.0384033e-03, 9.8851675e-01, 1.5366467e+01],\n", | ||
" [-2.4371677e-06, -3.8438825e-06, 1.0000000e+00]], dtype=float32), array([[ 9.9749213e-01, 1.6272087e-03, 4.3243721e-01],\n", | ||
" [-7.3282972e-05, 9.9533182e-01, 3.5523354e+01],\n", | ||
" [ 3.8597086e-06, -4.0187538e-07, 1.0000000e+00]], dtype=float32), array([[ 9.9992698e-01, 6.6664284e-03, -9.0784521e+00],\n", | ||
" [-9.0053231e-03, 9.9836856e-01, 1.5190173e+01],\n", | ||
" [-1.6761204e-07, -3.6131762e-06, 1.0000000e+00]], dtype=float32)]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Align images and save each capture to a layered tiff file" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import exiftool\n", | ||
"import datetime\n", | ||
"\n", | ||
"use_multi_process = True # set to False for single-process saving\n", | ||
"overwrite_existing = False # skip existing files, set to True to overwrite\n", | ||
"\n", | ||
"## This progress widget is used for display of the long-running process\n", | ||
"f2 = FloatProgress(min=0, max=1, layout=Layout(width='100%'), description=\"Saving\")\n", | ||
"display(f2)\n", | ||
"def update_f2(val):\n", | ||
" f2.value=val\n", | ||
"\n", | ||
"# Save out geojson data so we can open the image capture locations in our GIS\n", | ||
"with open(os.path.join(outputPath,'imageSet.json'),'w') as f:\n", | ||
" f.write(str(geojson_data))\n", | ||
"\n", | ||
"# If we didn't provide a panel above, irradiance set to None will cause DLS data to be used\n", | ||
"try:\n", | ||
" irradiance = panel_irradiance+[0]\n", | ||
"except NameError:\n", | ||
" irradiance = None\n", | ||
"\n", | ||
"start_time = datetime.datetime.now()\n", | ||
"\n", | ||
"# Save all captures in the imageset as aligned stacks\n", | ||
"imgset.save_stacks(warp_matrices,\n", | ||
" outputPath,\n", | ||
" thumbnailPath,\n", | ||
" irradiance = irradiance,\n", | ||
" multiprocess=use_multi_process, \n", | ||
" overwrite=overwrite_existing, \n", | ||
" progress_callback=update_f2)\n", | ||
"\n", | ||
"end_time = datetime.datetime.now()\n", | ||
"update_f2(1.0)\n", | ||
"\n", | ||
"print(\"Saving time: {}\".format(end_time-start_time))\n", | ||
"print(\"Alignment+Saving rate: {:.2f} captures per second\".format(float(len(imgset.captures))/float((end_time-start_time).total_seconds())))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Extract Metadata from Captures list and save to log.csv" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"def decdeg2dms(dd):\n", | ||
" is_positive = dd >= 0\n", | ||
" dd = abs(dd)\n", | ||
" minutes,seconds = divmod(dd*3600,60)\n", | ||
" degrees,minutes = divmod(minutes,60)\n", | ||
" degrees = degrees if is_positive else -degrees\n", | ||
" return (degrees,minutes,seconds)\n", | ||
"\n", | ||
"header = \"SourceFile,\\\n", | ||
"GPSDateStamp,GPSTimeStamp,\\\n", | ||
"GPSLatitude,GpsLatitudeRef,\\\n", | ||
"GPSLongitude,GPSLongitudeRef,\\\n", | ||
"GPSAltitude,GPSAltitudeRef,\\\n", | ||
"FocalLength,\\\n", | ||
"XResolution,YResolution,ResolutionUnits\\n\"\n", | ||
"\n", | ||
"lines = [header]\n", | ||
"for capture in imgset.captures:\n", | ||
" #get lat,lon,alt,time\n", | ||
" outputFilename = capture.uuid+'.tif'\n", | ||
" fullOutputPath = os.path.join(outputPath, outputFilename)\n", | ||
" lat,lon,alt = capture.location()\n", | ||
" #write to csv in format:\n", | ||
" # IMG_0199_1.tif,\"33 deg 32' 9.73\"\" N\",\"111 deg 51' 1.41\"\" W\",526 m Above Sea Level\n", | ||
" latdeg, latmin, latsec = decdeg2dms(lat)\n", | ||
" londeg, lonmin, lonsec = decdeg2dms(lon)\n", | ||
" latdir = 'North'\n", | ||
" if latdeg < 0:\n", | ||
" latdeg = -latdeg\n", | ||
" latdir = 'South'\n", | ||
" londir = 'East'\n", | ||
" if londeg < 0:\n", | ||
" londeg = -londeg\n", | ||
" londir = 'West'\n", | ||
" resolution = capture.images[0].focal_plane_resolution_px_per_mm\n", | ||
"\n", | ||
" linestr = '\"{}\",'.format(fullOutputPath)\n", | ||
" linestr += capture.utc_time().strftime(\"%Y:%m:%d,%H:%M:%S,\")\n", | ||
" linestr += '\"{:d} deg {:d}\\' {:.2f}\"\" {}\",{},'.format(int(latdeg),int(latmin),latsec,latdir[0],latdir)\n", | ||
" linestr += '\"{:d} deg {:d}\\' {:.2f}\"\" {}\",{},{:.1f} m Above Sea Level,Above Sea Level,'.format(int(londeg),int(lonmin),lonsec,londir[0],londir,alt)\n", | ||
" linestr += '{}'.format(capture.images[0].focal_length)\n", | ||
" linestr += '{},{},mm'.format(resolution,resolution)\n", | ||
" linestr += '\\n' # when writing in text mode, the write command will convert to os.linesep\n", | ||
" lines.append(linestr)\n", | ||
"\n", | ||
"fullCsvPath = os.path.join(outputPath,'log.csv')\n", | ||
"with open(fullCsvPath, 'w') as csvfile: #create CSV\n", | ||
" csvfile.writelines(lines)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"## Use Exiftool from the command line to write metadata to images" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import subprocess\n", | ||
"\n", | ||
"if os.environ.get('exiftoolpath') is not None:\n", | ||
" exiftool_cmd = os.path.normpath(os.environ.get('exiftoolpath'))\n", | ||
"else:\n", | ||
" exiftool_cmd = 'exiftool'\n", | ||
" \n", | ||
"cmd = '{} -csv=\"{}\" -overwrite_original {}'.format(exiftool_cmd, fullCsvPath, outputPath)\n", | ||
"print(cmd)\n", | ||
"if(subprocess.check_call(cmd) == 0):\n", | ||
" print(\"Successfully updated stack metadata\")" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.7.3" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.