diff --git a/.DS_Store b/.DS_Store
new file mode 100644
index 0000000..bd4dca9
Binary files /dev/null and b/.DS_Store differ
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..8cdb845
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {description}
+ Copyright (C) {year} {fullname}
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ {signature of Ty Coon}, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c806602
--- /dev/null
+++ b/README.md
@@ -0,0 +1,23 @@
+# Conan-ML
+
+Conan-ML software is a tool engineered for precise and automated
+processing and analysis of contact angle data. The software utilizes image
+processing algorithms to analyze captured images of droplets on surface.
+Current implmentation is only optimised for high angle systems.
+
+# Dependencies
+
+Conan-ML is written to run on python3.6+. The packages required to run
+conan ML must be installed in your Python environment. These are included
+listed in requirements.txt and can be install using
+
+pip install -r requirements.txt
+
+# Usage
+
+To load Conan-ML GUI run the conan.py file using
+
+python conan.py
+
+However, functions from each file of the modules directory can be called
+for a more customised approach.
diff --git a/conan.py b/conan.py
new file mode 100644
index 0000000..3cef411
--- /dev/null
+++ b/conan.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import unicode_literals
+from __future__ import print_function
+# from modules.classes import ExperimentalDrop, DropData, Tolerances
+# from modules.static_setup_class import ExperimentalSetup
+# # from modules.ui import initialise_ui
+# from modules.user_interface import call_user_input
+# # from modules.load import load_data
+# from modules.extract_data import extract_drop_profile
+# from modules.initialise_parameters import initialise_parameters
+# # from modules.fit_data import fit_raw_experiment
+# # from modules.user_set_regions
+
+
+from modules.classes import ExperimentalSetup, ExperimentalDrop, DropData, Tolerances
+#from modules.PlotManager import PlotManager
+from modules.ExtractData import ExtractedData
+
+from modules.user_interface import call_user_input
+from modules.read_image import get_image
+from modules.select_regions import set_drop_region,set_surface_line, correct_tilt
+from modules.extract_profile import extract_drop_profile
+from modules.extract_profile import image_crop
+from modules.initialise_parameters import initialise_parameters
+#from modules.analyse_needle import calculate_needle_diameter
+#from modules.fit_data import fit_experimental_drop
+from modules.fits import perform_fits
+#from modules.generate_data import generate_full_data
+# from modules. import add_data_to_lists
+
+import matplotlib.pyplot as plt
+
+
+import os
+import numpy as np
+import tkinter as tk
+from tkinter import font as tkFont
+
+import timeit
+import time
+
+np.set_printoptions(suppress=True)
+np.set_printoptions(precision=3)
+
+DELTA_TOL = 1.e-6
+GRADIENT_TOL = 1.e-6
+MAXIMUM_FITTING_STEPS = 10
+OBJECTIVE_TOL = 1.e-4
+ARCLENGTH_TOL = 1.e-6
+MAXIMUM_ARCLENGTH_STEPS = 10
+NEEDLE_TOL = 1.e-4
+NEEDLE_STEPS = 20
+
+
+
+def main():
+ clear_screen()
+ fitted_drop_data = DropData()
+ tolerances = Tolerances(
+ DELTA_TOL,
+ GRADIENT_TOL,
+ MAXIMUM_FITTING_STEPS,
+ OBJECTIVE_TOL,
+ ARCLENGTH_TOL,
+ MAXIMUM_ARCLENGTH_STEPS,
+ NEEDLE_TOL,
+ NEEDLE_STEPS)
+ user_inputs = ExperimentalSetup()
+
+ call_user_input(user_inputs)
+
+ if user_inputs.ML_boole == True:
+ from modules.ML_model.prepare_experimental import prepare4model_v03, experimental_pred
+ import tensorflow as tf
+ model_path = './modules/ML_model/'
+ model = tf.keras.models.load_model(model_path)
+ #model = tf.keras.Model.load_weights(model_path).expect_partial()
+
+ n_frames = user_inputs.number_of_frames
+ extracted_data = ExtractedData(n_frames, fitted_drop_data.parameter_dimensions)
+ raw_experiment = ExperimentalDrop()
+
+ #if user_inputs.interfacial_tension_boole:
+ # plots = PlotManager(user_inputs.wait_time, n_frames)
+
+ #get_image(raw_experiment, user_inputs, -1) #is this needed?
+
+ for i in range(n_frames):
+ print("\nProcessing frame %d of %d..." % (i+1, n_frames))
+ input_file = user_inputs.import_files[i]
+ print("\nProceccing " + input_file)
+ time_start = timeit.default_timer()
+ raw_experiment = ExperimentalDrop()
+ get_image(raw_experiment, user_inputs, i) # save image in here...
+ set_drop_region(raw_experiment, user_inputs)
+ # extract_drop_profile(raw_experiment, user_inputs)
+ extract_drop_profile(raw_experiment, user_inputs)
+
+
+# croppedImage = image_crop(raw_experiment.image, user_inputs.drop_region)
+# xx = np.array([0,croppedImage.shape[1]])
+
+# plt.imshow(croppedImage, origin='upper', cmap = 'gray')
+# # plt.plot(contour_x,contour_y,"--",color="white",linewidth = 2.0)
+# plt.plot(raw_experiment.drop_data[:,0],-raw_experiment.drop_data[:,1],color="white",linewidth=2.0)
+# plt.plot(xx,raw_experiment.surface_data(xx),'r--',linewidth=2.0)
+
+# plt.show()
+
+ # plt.hold()
+ if i == 0:
+ extracted_data.initial_image_time = raw_experiment.time
+ filename = user_inputs.filename[:-4] + '_' + user_inputs.time_string + ".csv"
+ export_filename = os.path.join(user_inputs.directory_string, filename)
+ #print("HERE!!")
+ set_surface_line(raw_experiment, user_inputs) #fits performed here if baseline_method is User-selected
+
+ # these methods don't need tilt correction
+ if user_inputs.baseline_method == "Automated":
+ if user_inputs.tangent_boole == True or user_inputs.second_deg_polynomial_boole == True or user_inputs.circle_boole == True or user_inputs.ellipse_boole == True:
+ perform_fits(raw_experiment, tangent=user_inputs.tangent_boole, polynomial=user_inputs.second_deg_polynomial_boole, circle=user_inputs.circle_boole,ellipse=user_inputs.ellipse_boole)
+
+ # YL fit and ML model need tilt correction
+ if user_inputs.ML_boole == True or user_inputs.YL_boole == True:
+ correct_tilt(raw_experiment, user_inputs)
+ extract_drop_profile(raw_experiment, user_inputs)
+ if user_inputs.baseline_method == "Automated":
+ set_surface_line(raw_experiment, user_inputs)
+ # experimental_setup.baseline_method == 'User-selected' should work as is
+
+ #raw_experiment.contour = extract_edges_CV(raw_experiment.cropped_image, threshold_val=raw_experiment.ret, return_thresholed_value=False)
+ #experimental_drop.drop_contour, experimental_drop.contact_points = prepare_hydrophobic(experimental_drop.contour)
+
+ if user_inputs.YL_boole == True:
+ print('Performing YL fit...')
+ perform_fits(raw_experiment, YL=user_inputs.YL_boole)
+ if user_inputs.ML_boole == True:
+ pred_ds = prepare4model_v03(raw_experiment.drop_contour)
+ ML_predictions, timings = experimental_pred(pred_ds, model)
+ raw_experiment.contact_angles['ML model'] = {}
+ raw_experiment.contact_angles['ML model']['angles'] = [ML_predictions[0,0],ML_predictions[1,0]]
+ raw_experiment.contact_angles['ML model']['timings'] = timings
+
+ extracted_data.contact_angles = raw_experiment.contact_angles # DS 7/6/21
+
+ #print(extracted_data.contact_angles) #for the dictionary output
+ print('Extracted outputs:')
+ for key1 in extracted_data.contact_angles.keys():
+ for key2 in extracted_data.contact_angles[key1].keys():
+ print(key1+' '+key2+': ')
+ print(' ',extracted_data.contact_angles[key1][key2])
+ print()
+
+ # LMF least-squares fit (was commented out before)
+ #fit_experimental_drop(raw_experiment, fitted_drop_data, user_inputs, tolerances)
+ #generate_full_data(extracted_data, raw_experiment, fitted_drop_data, user_inputs, i)
+ #data_vector = extracted_data.time_IFT_vol_area(i)
+ #if user_inputs.interfacial_tension_boole:
+ # plots.append_data_plot(data_vector, i)
+ #if i != (n_frames - 1):
+ # time_loop = timeit.default_timer() - time_start
+ # pause_wait_time(time_loop, user_inputs.wait_time)
+
+
+ extracted_data.export_data(input_file,filename,i)
+# cheeky_pause()
+
+def clear_screen():
+ os.system('cls' if os.name == 'nt' else 'clear')
+
+def pause_wait_time(elapsed_time, requested_time):
+ if elapsed_time > requested_time:
+ print('WARNING: Fitting took longer than desired wait time')
+ else:
+ time.sleep(requested_time - elapsed_time)
+
+def cheeky_pause():
+ import Tkinter
+ import tkMessageBox
+ import cv2
+ # cv2.namedWindow("Pause")
+ # while 1:
+ # k = cv2.waitKey(1) & 0xFF
+ # if (k==27):
+ # break
+ #root = Tkinter.Tk()
+ # B = Tkinter.Button(top, text="Exit",command = cv2.destroyAllWindows())
+ # B = Tkinter.Button(root, text="Exit",command = root.destroy())
+ #
+ # B.pack()
+ # root.mainloop()
+
+ root = Tkinter.Tk()
+ frame = Tkinter.Frame(root)
+ frame.pack()
+
+ button = Tkinter.Button(frame)
+ button['text'] ="Good-bye."
+ button['command'] = root.destroy()#close_window(root)
+ button.pack()
+
+ root.mainloop()
+
+def quit_(root):
+ root.quit()
+
+#def close_window(root):
+# root.destroy()
+
+
+if __name__ == '__main__':
+ main()
+ root = tk.Tk()
+ # quit button
+ buttonFont = tkFont.Font(family='Helvetica', size=48, weight='bold') #This isn't working for some reason (??)
+ quit_button = tk.Button(master=root, font=buttonFont,text='Quit',height=4,width=15,
+ command=lambda: quit_(root),bg='blue',fg='white',activeforeground='white',activebackground='red')
+ quit_button.pack()
+ root.mainloop()
diff --git a/conan.pyc b/conan.pyc
new file mode 100644
index 0000000..15c073f
Binary files /dev/null and b/conan.pyc differ
diff --git a/experimental data set/10.bmp b/experimental data set/10.bmp
new file mode 100644
index 0000000..afe112a
Binary files /dev/null and b/experimental data set/10.bmp differ
diff --git a/experimental data set/111.031693.bmp b/experimental data set/111.031693.bmp
new file mode 100644
index 0000000..c3d4462
Binary files /dev/null and b/experimental data set/111.031693.bmp differ
diff --git a/experimental data set/113.66.bmp b/experimental data set/113.66.bmp
new file mode 100644
index 0000000..39e397e
Binary files /dev/null and b/experimental data set/113.66.bmp differ
diff --git a/experimental data set/113.98.bmp b/experimental data set/113.98.bmp
new file mode 100644
index 0000000..0b35e8c
Binary files /dev/null and b/experimental data set/113.98.bmp differ
diff --git a/experimental data set/114.47.bmp b/experimental data set/114.47.bmp
new file mode 100644
index 0000000..df63273
Binary files /dev/null and b/experimental data set/114.47.bmp differ
diff --git a/experimental data set/115.00.bmp b/experimental data set/115.00.bmp
new file mode 100644
index 0000000..f1471c4
Binary files /dev/null and b/experimental data set/115.00.bmp differ
diff --git a/experimental data set/115.553909.bmp b/experimental data set/115.553909.bmp
new file mode 100644
index 0000000..6a5c695
Binary files /dev/null and b/experimental data set/115.553909.bmp differ
diff --git a/experimental data set/115.714851.bmp b/experimental data set/115.714851.bmp
new file mode 100644
index 0000000..b21d144
Binary files /dev/null and b/experimental data set/115.714851.bmp differ
diff --git a/experimental data set/118.174171.bmp b/experimental data set/118.174171.bmp
new file mode 100644
index 0000000..dd5042b
Binary files /dev/null and b/experimental data set/118.174171.bmp differ
diff --git a/experimental data set/2-s@M@Z-120.321945190429.bmp b/experimental data set/2-s@M@Z-120.321945190429.bmp
new file mode 100644
index 0000000..2e30e38
Binary files /dev/null and b/experimental data set/2-s@M@Z-120.321945190429.bmp differ
diff --git a/experimental data set/2-s@M@Z-121.057571411132.bmp b/experimental data set/2-s@M@Z-121.057571411132.bmp
new file mode 100644
index 0000000..4da5c97
Binary files /dev/null and b/experimental data set/2-s@M@Z-121.057571411132.bmp differ
diff --git a/experimental data set/2-s@M@Z-122.639770507812.bmp b/experimental data set/2-s@M@Z-122.639770507812.bmp
new file mode 100644
index 0000000..a842631
Binary files /dev/null and b/experimental data set/2-s@M@Z-122.639770507812.bmp differ
diff --git a/experimental data set/20171112JT.BMP b/experimental data set/20171112JT.BMP
new file mode 100644
index 0000000..575056b
Binary files /dev/null and b/experimental data set/20171112JT.BMP differ
diff --git a/experimental data set/20171112JT4_1.BMP b/experimental data set/20171112JT4_1.BMP
new file mode 100644
index 0000000..a750fb4
Binary files /dev/null and b/experimental data set/20171112JT4_1.BMP differ
diff --git a/experimental data set/20171112JT4_2.BMP b/experimental data set/20171112JT4_2.BMP
new file mode 100644
index 0000000..da24155
Binary files /dev/null and b/experimental data set/20171112JT4_2.BMP differ
diff --git a/experimental data set/20171112JT4_3.BMP b/experimental data set/20171112JT4_3.BMP
new file mode 100644
index 0000000..6e98033
Binary files /dev/null and b/experimental data set/20171112JT4_3.BMP differ
diff --git a/experimental data set/20171112JT4_4.BMP b/experimental data set/20171112JT4_4.BMP
new file mode 100644
index 0000000..87516fb
Binary files /dev/null and b/experimental data set/20171112JT4_4.BMP differ
diff --git a/experimental data set/20171112JT4_5.BMP b/experimental data set/20171112JT4_5.BMP
new file mode 100644
index 0000000..6da8769
Binary files /dev/null and b/experimental data set/20171112JT4_5.BMP differ
diff --git a/experimental data set/20171112JT4_6.BMP b/experimental data set/20171112JT4_6.BMP
new file mode 100644
index 0000000..e028393
Binary files /dev/null and b/experimental data set/20171112JT4_6.BMP differ
diff --git a/experimental data set/20171112JT4_7.BMP b/experimental data set/20171112JT4_7.BMP
new file mode 100644
index 0000000..40c3a4e
Binary files /dev/null and b/experimental data set/20171112JT4_7.BMP differ
diff --git a/experimental data set/20230324_water_air_1c_0_250_15_2.jpg b/experimental data set/20230324_water_air_1c_0_250_15_2.jpg
new file mode 100644
index 0000000..67e3090
Binary files /dev/null and b/experimental data set/20230324_water_air_1c_0_250_15_2.jpg differ
diff --git a/experimental data set/20230328_water_solvent_1b_0_250_15_1.jpg b/experimental data set/20230328_water_solvent_1b_0_250_15_1.jpg
new file mode 100644
index 0000000..b9e05ee
Binary files /dev/null and b/experimental data set/20230328_water_solvent_1b_0_250_15_1.jpg differ
diff --git a/experimental data set/20230328_water_solvent_1b_0_250_15_6.jpg b/experimental data set/20230328_water_solvent_1b_0_250_15_6.jpg
new file mode 100644
index 0000000..5a97e14
Binary files /dev/null and b/experimental data set/20230328_water_solvent_1b_0_250_15_6.jpg differ
diff --git a/experimental data set/20230404_water_solvent_2a_0_100_10_3.jpg b/experimental data set/20230404_water_solvent_2a_0_100_10_3.jpg
new file mode 100644
index 0000000..26fce9c
Binary files /dev/null and b/experimental data set/20230404_water_solvent_2a_0_100_10_3.jpg differ
diff --git a/experimental data set/3.bmp b/experimental data set/3.bmp
new file mode 100644
index 0000000..909032a
Binary files /dev/null and b/experimental data set/3.bmp differ
diff --git a/experimental data set/359 CA162.BMP b/experimental data set/359 CA162.BMP
new file mode 100644
index 0000000..bf26b65
Binary files /dev/null and b/experimental data set/359 CA162.BMP differ
diff --git a/experimental data set/5.bmp b/experimental data set/5.bmp
new file mode 100644
index 0000000..e09dc59
Binary files /dev/null and b/experimental data set/5.bmp differ
diff --git a/experimental data set/96-0.bmp b/experimental data set/96-0.bmp
new file mode 100644
index 0000000..74500ea
Binary files /dev/null and b/experimental data set/96-0.bmp differ
diff --git a/experimental data set/96-1.bmp b/experimental data set/96-1.bmp
new file mode 100644
index 0000000..68889fb
Binary files /dev/null and b/experimental data set/96-1.bmp differ
diff --git a/experimental data set/96-2.bmp b/experimental data set/96-2.bmp
new file mode 100644
index 0000000..3d2bda0
Binary files /dev/null and b/experimental data set/96-2.bmp differ
diff --git a/experimental data set/96-3.bmp b/experimental data set/96-3.bmp
new file mode 100644
index 0000000..e63d7b0
Binary files /dev/null and b/experimental data set/96-3.bmp differ
diff --git a/experimental data set/96-4.bmp b/experimental data set/96-4.bmp
new file mode 100644
index 0000000..1badd33
Binary files /dev/null and b/experimental data set/96-4.bmp differ
diff --git a/experimental data set/96-5.bmp b/experimental data set/96-5.bmp
new file mode 100644
index 0000000..e44bcb1
Binary files /dev/null and b/experimental data set/96-5.bmp differ
diff --git a/experimental data set/96-6.bmp b/experimental data set/96-6.bmp
new file mode 100644
index 0000000..177e70b
Binary files /dev/null and b/experimental data set/96-6.bmp differ
diff --git a/experimental data set/96-7.bmp b/experimental data set/96-7.bmp
new file mode 100644
index 0000000..397a49b
Binary files /dev/null and b/experimental data set/96-7.bmp differ
diff --git a/experimental data set/96-8.bmp b/experimental data set/96-8.bmp
new file mode 100644
index 0000000..37c5cf1
Binary files /dev/null and b/experimental data set/96-8.bmp differ
diff --git a/experimental data set/96-9.bmp b/experimental data set/96-9.bmp
new file mode 100644
index 0000000..f553cfb
Binary files /dev/null and b/experimental data set/96-9.bmp differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.07 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.07 pm.png
new file mode 100644
index 0000000..51086f4
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.07 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.08 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.08 pm.png
new file mode 100644
index 0000000..d3a1f1f
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.08 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.10 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.10 pm.png
new file mode 100644
index 0000000..f24976a
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.10 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.13 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.13 pm.png
new file mode 100644
index 0000000..3f7f429
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.13 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.19 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.19 pm.png
new file mode 100644
index 0000000..3705f9b
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.19 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.24 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.24 pm.png
new file mode 100644
index 0000000..bca647b
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.24 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.28 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.28 pm.png
new file mode 100644
index 0000000..c48df0d
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.28 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.30 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.30 pm.png
new file mode 100644
index 0000000..a7f375b
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.30 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.32 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.32 pm.png
new file mode 100644
index 0000000..515ea5a
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.32 pm.png differ
diff --git a/experimental data set/Screenshot 2023-05-05 at 2.21.33 pm.png b/experimental data set/Screenshot 2023-05-05 at 2.21.33 pm.png
new file mode 100644
index 0000000..a6c9e95
Binary files /dev/null and b/experimental data set/Screenshot 2023-05-05 at 2.21.33 pm.png differ
diff --git a/experimental data set/TEF1.png b/experimental data set/TEF1.png
new file mode 100644
index 0000000..db53b43
Binary files /dev/null and b/experimental data set/TEF1.png differ
diff --git a/experimental data set/TEF2.png b/experimental data set/TEF2.png
new file mode 100644
index 0000000..0ef4ff0
Binary files /dev/null and b/experimental data set/TEF2.png differ
diff --git a/experimental data set/TEF3.png b/experimental data set/TEF3.png
new file mode 100644
index 0000000..edcba1a
Binary files /dev/null and b/experimental data set/TEF3.png differ
diff --git a/experimental data set/TEF4.BMP b/experimental data set/TEF4.BMP
new file mode 100644
index 0000000..add790e
Binary files /dev/null and b/experimental data set/TEF4.BMP differ
diff --git a/experimental data set/TEF5.BMP b/experimental data set/TEF5.BMP
new file mode 100644
index 0000000..df1aead
Binary files /dev/null and b/experimental data set/TEF5.BMP differ
diff --git a/experimental data set/TEF6.BMP b/experimental data set/TEF6.BMP
new file mode 100644
index 0000000..3256c25
Binary files /dev/null and b/experimental data set/TEF6.BMP differ
diff --git a/experimental data set/steelball.jpg b/experimental data set/steelball.jpg
new file mode 100644
index 0000000..0a7dea0
Binary files /dev/null and b/experimental data set/steelball.jpg differ
diff --git a/modules/.DS_Store b/modules/.DS_Store
new file mode 100644
index 0000000..9865d0a
Binary files /dev/null and b/modules/.DS_Store differ
diff --git a/modules/BA_fit.py b/modules/BA_fit.py
new file mode 100644
index 0000000..73037ae
--- /dev/null
+++ b/modules/BA_fit.py
@@ -0,0 +1,1454 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+"""This code serves as a discrete instance of the Bashforth-Adams fit of the
+Bashforth-Adams fit of the Young-Laplace equation, i.e. the BA_fit for
+contact angle analysis.
+
+Bashforth-Adams Young-Laplace fit code taken from most recent version on conan -
+conan-ML_v1.1/modules/BA_fit.py
+This was based on the BA fit of DropPy.
+"""
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import scipy.optimize as opt
+import numba
+from scipy.spatial import distance
+from scipy.integrate import solve_ivp
+import numpy as np
+import matplotlib.pyplot as plt
+import cv2
+from scipy.spatial.distance import cdist
+from sklearn.metrics import r2_score
+import time
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def dist(param, points):
+ '''
+ Calculate the total distance from the calculated circle to the points
+
+ :param param: list of 3 elements with the center of a circle and its
+ radius
+ :param points: list of (x, y) points that should be lying on the circle
+ :return: the sum of squared errrors from the points on the circle with
+ the provided parameters
+ '''
+ *z, r = param
+ ar = [(np.linalg.norm(np.array(z) - np.array(point)) - r) ** 2
+ for point in points]
+ return np.sum(ar)
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=False,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if cluster: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 1: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def bashforth_adams(t, y, a, b):
+ '''
+ Rate of change of point on droplet profile with respect to the parametric
+ angle Ï•.
+
+ :param t: Parameteric angle Ï•
+ :param y: (x, z) point on the droplet profile
+ :param a: capillary length in units px
+ :param b: radius of curvature at apex in units of px
+ :return: rate of change of each x and z with respect to Ï•
+
+ #radio of a and b (b^2/a^2) should be bond number
+ '''
+ x, z = y
+ t = t / 180 * np.pi
+ #dxdphi = b*x*np.cos(t) / (a**2 * b * x * z + 2 * x - b * np.sin(t))
+ #dzdphi = b*x*np.sin(t) / (a**2 * b * x * z + 2 * x - b * np.sin(t))
+ dxdphi = b*x*np.cos(t) / ((b/a**2) * x * z + 2 * x - b * np.sin(t))
+ dzdphi = b*x*np.sin(t) / ((b/a**2) * x * z + 2 * x - b * np.sin(t))
+ return dxdphi, dzdphi
+
+def sim_bashforth_adams(h, a=1, b=1, num=500, all_the_way=False):
+ '''
+ Simulates the full profile of the Bashforth-Adams droplet from the apex
+
+ Starts at x = +-1e-5, z = 0 and integrates to z = h along the curve
+ defined by the ``bashforth-adams`` function
+
+ :param h: Height of the droplet in px
+ :param a: Capillary length of the fluid
+ :param b: Curvature at the apex
+ :param num: Number of coordinate points outputted
+ :param all_the_way: Boolean to determine whether to stop at z==h or Ï•==180
+ :return: List of Ï• and (x, z) coordinates where the solver executed
+ '''
+ height = lambda t, y, a, b: y[1] - h
+ height.terminal = True
+ if all_the_way:
+ height.terminal = False
+
+ sol_l = solve_ivp(bashforth_adams, (0, -180) , (1e-5, 0), args=(a, b, ),
+ method='BDF',
+ t_eval=np.linspace(0, -180, num=num), events=height)
+ sol_r = solve_ivp(bashforth_adams, (0, 180) , (1e-5, 0), args=(a, b, ),
+ method='BDF',
+ t_eval=np.linspace(0, 180, num=num), events=height)
+
+
+ angles = np.hstack((sol_l.t, sol_r.t[::-1])).T
+ pred = np.vstack([np.hstack((sol_l.y[0],sol_r.y[0][::-1])),
+ np.hstack((sol_l.y[1],sol_r.y[1][::-1]))]).T
+ if 1: #DS
+ #print('angles: ', angles)
+ #Bo = (b*b)/(a*a)
+ #print('a is: ',a)
+ #print('b is: ',b)
+ #print('h is: ',h)
+ Bo = 18*(b*b)/(a*a)#*) # Bond number out by a factor of 18 for some reason
+ return angles, pred, Bo
+
+def fit_bashforth_adams(data): #a=0.1,b= 3
+ '''
+ Calculates the best-fit capillary length and curvature at the apex given
+ the provided data for the points on the edge of the droplet
+
+ :param data: list of (x, y) points of the droplet edges
+ :param a: initial guess of capillary length
+ :param b: initial guess of curvature at the apex
+ :return: solution structure from scipy.opt.minimize
+ '''
+ def calc_error(h, params):
+ '''
+ Calulate the sum-squared error between the points on the curve and
+ the measured data points
+
+ :param h: Height of the droplet in pixels
+ :param params: tuple of capillary length and curvature at apex
+ :return: sum-squared error between points on the curve and data
+ '''
+ a, b = params
+
+ _, pred,Bo = sim_bashforth_adams(h, a=a, b=b)
+
+ #print('a is: ',a)
+ #print('b is: ',b)
+
+ dist = distance.cdist(data, pred)
+ return np.linalg.norm(np.min(dist, axis=1))
+
+ h = np.max(data[:, 1])
+ b = h/2
+ a = h/10
+ x_0 = (a, b)
+ bounds = [[0,10], [0, 100]]
+
+
+ optimum = opt.minimize(lambda x: calc_error(h, x), x_0,
+ method='Nelder-Mead',
+ options={'disp':False})
+ return optimum
+
+def calculate_angle(v1, v2):
+ '''
+ Compute the angle between two vectors of equal length
+
+ :param v1: numpy array
+ :param v2: numpy array
+ :return: angle between the two vectors v1 and v2 in degrees
+ '''
+ v1_u = v1 / np.linalg.norm(v1)
+ v2_u = v2 / np.linalg.norm(v2)
+ return np.arccos(np.dot(v1_u, v2_u)) * 360 / 2 / np.pi
+
+def fit_circle(points, width=None, start=False):
+ '''
+ Compute the best-fit circle to ``points`` by minimizing ``dist`` from
+ changing values of the centerpoint and radius
+
+ :param points: list of (x,y) points lying on the droplet to fit
+ :param width: estimated width of the droplet from the crop boundaries
+ :param start: boolean flag to determine how many parameters to fit (just
+ radius if True, otherwise radius and centerpoint)
+ :return: result structure from scipy.opt.minimize
+ '''
+ if width is None:
+ width = np.max(points[:, 0]) - np.min(points[:, 1])
+
+ if start:
+ # Try to fit a circle to the points that we have extracted,
+ # only varying the radius about the center of all the points
+ z = np.mean(points, axis=0)
+ res = opt.minimize(lambda x: dist([*z, x], points),
+ width / 2)
+
+ # Get the results
+ res['x'] = np.array([*z, res['x'][0]])
+ else:
+ # Fit this new set of points, using the full set of parameters
+ res = opt.minimize(lambda x: dist(x, points),
+ np.concatenate((np.mean(points, axis=0),
+ [width / 4])))
+
+ return res
+
+def generate_circle_vectors(intersection):
+ '''
+ Using the intersection point with the baseline, compute the vector that
+ points tangent to the circle
+
+ :param intersection: (x,y) point on the circle that crosses the baseline
+ :return: baseline vector and vector tangent to best-fit circle
+ '''
+ x_t, y_t = intersection
+ # For contact angle, want interior angle, so look at vector in
+ # negative x direction (this is our baseline)
+ v1 = np.array([-1, 0])
+
+ # Now get line tangent to circle at x_t, y_t
+ if y_t != 0:
+ slope = - x_t / y_t
+ v2 = np.array([1, slope])
+ v2 = v2 / np.linalg.norm(v2)
+ if y_t < 0:
+ # We want the interior angle, so when the line is
+ # above the origin (into more negative y), look left
+ v2 = -v2
+ else:
+ v2 = np.array([0, 1])
+
+ return v1, v2
+
+def find_intersection(baseline_coeffs, circ_params):
+ '''
+ Compute the intersection points between the best fit circle and best-fit
+ baseline.
+
+ For this we rely on several coordinate transformations, first a
+ translation to the centerpoint of the circle and then a rotation to give
+ the baseline zero-slope.
+
+ :param baseline_coeffs: Numpy array of coefficients to the baseline
+ polynomial
+ :param circ_params: centerpoint and radius of best-fit circle
+ :return: (x,y) point of intersection between these two shapes
+ '''
+ *z, r = circ_params
+ b, m = baseline_coeffs[0:2]
+ # Now we need to actually get the points of intersection
+ # and the angles from these fitted curves. Rather than brute force
+ # numerical solution, use combinations of coordinate translations and
+ # rotations to arrive at a horizontal line passing through a circle.
+ # First step will be to translate the origin to the center-point
+ # of our fitted circle
+ # x = x - z[0], y = y - z[1]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = m * x + (m * z[0] + b - z[1])
+ # Now we need to rotate clockwise about the origin by an angle q,
+ # s.t. tan(q) = m
+ # Our transformation is defined by the typical rotation matrix
+ # [x;y] = [ [ cos(q) , sin(q) ] ;
+ # [-sin(q) , cos(q) ] ] * [ x ; y ]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = (m*z[0] + b[0] - z[1])/sqrt(1 + m**2)
+ # (no dependence on x - as expected)
+
+ # With this simplified scenario, we can easily identify the points
+ # (x,y) where the line y = B
+ # intersects the circle x**2 + y**2 = r**2
+ # In our transformed coordinates, only keeping the positive root,
+ # this is:
+
+ B = (m * z[0] + b - z[1]) / np.sqrt(1 + m**2)
+
+ if B > r:
+ raise ValueError("The circle and baseline do not appear to intersect")
+ x_t = np.sqrt(r ** 2 - B ** 2)
+ y_t = B
+
+ # TODO:// replace the fixed linear baseline with linear
+ # approximations near the intersection points
+
+ return x_t, y_t
+
+def YL_closest_point(xp, yp, YL_points, display=False):
+ """
+ xp (float): The x-coordinate of the reference point
+ yp (float): The y-coordinate of the reference point
+ YL_points (array): The array of x, y coordinates outputted by the YL fit
+ display (Boolean): Set to True to output figures and information.
+
+ Returns:
+ The distance between the reference point and the YL fit, and
+ the coordinates of the closest point on the YL fit.
+
+ """
+
+ x = YL_points[:,0]
+ y = YL_points[:,1]
+
+ dist = np.sqrt((x - xp) ** 2 + (y - yp) ** 2)
+ idx = list(dist).index(min(dist))
+
+ #ddistdt = ((b ** 2 - a ** 2) * np.cos(t) + a * np.sin(np.deg2rad(th)) * yp - a * np.sin(np.deg2rad(th)) * yc + a * np.cos(np.deg2rad(th)) * xp - a * np.cos(np.deg2rad(th)) * xc) * np.sin(t) + ((-b * np.cos(np.deg2rad(th)) * yp) + b * np.cos(np.deg2rad(th)) * yc + b * np.sin(np.deg2rad(th)) * xp - b * np.sin(np.deg2rad(th)) * xc) * np.cos(t)
+ #idx = np.where(ddistdt[1:] * ddistdt[:-1] < 0)[0] # find zeros
+ #m = (ddistdt[idx + 1] - ddistdt[idx]) / (t[idx + 1] - t[idx]) # slope
+ if display:
+ plt.figure(1)
+ plt.plot(x, y, '-', xp, yp, 'r+', x[idx], y[idx], 'r+')
+ plt.xlabel('x')
+ plt.ylabel('y')
+ plt.title('Circle, Point, and Zeros')
+
+ plt.figure(2)
+ for t, d in enumerate(dist):
+ plt.plot(t, d, 'm.')
+ plt.plot(idx, dist[idx], 'cx')
+ plt.xlabel('index value of list')
+ plt.ylabel('Distance')
+ plt.title('Distance Function')
+
+ print(f'xp: {xp}, x[idx]: {x[idx]}')
+ print(f'yp: {yp}, y[idx]: {y[idx]}')
+ print('Error is: ', dist[idx])
+
+ plt.show()
+ plt.close()
+
+ return dist[idx], [x[idx],y[idx]]
+
+def YL_fit_errors(contour, YL_points, display=False):
+ """
+ Calculates the minimum distance between a point and a point of the YL fit.
+
+ Parameters:
+ contour (array): The array of x, y coordindate points
+ YL_points (array): The array of x, y coordinate points of the YL fit
+ display (boolean): Set to true to show figures.
+
+ Returns:
+ dictionary: The MAE, MSE, RMSE, and maximum error of the contour as compared against the
+ YL fit.
+ """
+
+ errors = []
+
+ for point in contour:
+ dist2edge, edge_point = YL_closest_point(point[0], point[1], YL_points, display=display)
+ errors.append(dist2edge)
+
+ error_measures = {}
+
+ error_measures['MAE'] = sum([abs(error) for error in errors])/len(errors)
+ error_measures['MSE'] = sum([error**2 for error in errors])/len(errors)
+ error_measures['RMSE'] = np.sqrt(sum([error**2 for error in errors])/len(errors))
+ error_measures['Maximum error'] = max(errors)
+
+ return error_measures
+
+def analyze_frame(img,lim=10,fit_type='bashforth-adams',display=False):
+ """This is the function which must be called to perform the BA fit.
+ For best results, preprocessing must be perfromed before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+
+ img = img.astype("uint8")
+
+ edges_pts = extract_edges_CV(img) # array of x,y coords where lines are detected
+ #print('bounds :',bounds)
+ #print('lim: ',lim)
+
+ if display:
+ plt.imshow(img)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('Extracted edge')
+ plt.show()
+ plt.close()
+
+ profile,CPs = prepare_hydrophobic(edges_pts,display)
+
+ a = [CPs[0][1],(CPs[1][1]-CPs[0][1])/(CPs[1][0]-CPs[0][0])] # of form [first y value of baseline, gradient]
+
+ #if display:
+ # plt.imshow(img)
+ # plt.title('prepared contour and contact points')
+ # plt.plot(profile[:,0],profile[:,1],'o')
+ # plt.plot(CPs[0][0],CPs[0][1],'yo')
+ # plt.plot(CPs[1][0],CPs[1][1],'yo')
+ # plt.show()
+ # plt.close()
+
+ # define baseline as between the two contact points
+ x = np.linspace(CPs[0][1],CPs[1][1],50)
+ y = np.linspace(CPs[0][0],CPs[1][0],50)
+ baseline = np.array([y,x]).T
+ #print(baseline)
+
+
+ circle = profile.astype(dtype='int') #circle object is an array of xy pairs
+
+ fit_preprocessing_time = time.time() - start_time
+ fit_start_time = time.time()
+
+ # now fit the circle, this will be used as a starting point for the BA fit
+ if fit_type == 'circular' or fit_type == 'bashforth-adams':
+ width = max(profile[:,0])
+
+ res = fit_circle(circle, width, start=True)
+ *z, r = res['x']
+
+ theta = np.linspace(0, 2 * np.pi, num=500)
+ x = z[0] + r * np.cos(theta)
+ y = z[1] + r * np.sin(theta)
+
+ iters = 0
+
+ # Keep retrying the fitting while the function value is
+ # large, as this indicates that we probably have 2 circles
+ # (e.g. there's something light in the middle of the image)
+ while res['fun'] >= circle.shape[0] and iters < lim:
+
+ # Extract and fit only those points outside
+ if 0:# the previously fit circle
+ points = np.array([(x, y) for x, y in circle
+ if (x - z[0]) ** 2 + (y - z[1]) ** 2
+ >= r ** 2])
+ else:
+ points = circle
+ res = fit_circle(points, width)
+ *z, r = res['x']
+ iters += 1
+ else:
+ points = circle
+
+ res['x'] = [round(value) for value in res['x']] #round values up to account for pixelated image
+ res['x'][2] += 1 # to account for if the line is the next pixel over
+
+ x_t, y_t = find_intersection(a, res['x'])
+ #print('x_t is: ',x_t)
+ #print('y_t is: ',y_t)
+
+ v1, v2 = generate_circle_vectors([x_t, y_t])
+
+ Ï• = {i: calculate_angle(v1, v2) for i in ['left', 'right']}
+ if fit_type == 'circular':
+ baseline_width = 2 * x_t
+
+ volume = (2/3 * np.pi * r ** 3
+ + np.pi * r ** 2 * y_t
+ - np.pi * y_t ** 3 / 3)
+
+ # Fitted circle
+ theta = np.linspace(0, 2 * np.pi, num=100)
+ x = z[0] + r * np.cos(theta)
+ y = z[1] + r * np.sin(theta)
+ fit = np.array([x, y]).T
+ else:
+ if display:
+ #print('points: ',points)
+ plt.title('points before near edge sort')
+ plt.plot(points[:,0],points[:,1])
+ plt.show()
+ plt.close()
+ if 0:# Get points within 10 pixels of the circle edge - done better by grouping and prepare_hydrophobic
+ # not appropriate for high bond numbers but will also work for low angle drops
+ points = np.array([(x, y) for x, y in circle
+ if (x - z[0]) ** 2 + (y - z[1]) ** 2
+ >= (r-10) ** 2])
+ if display:
+ #print('points: ',points)
+ plt.title('points near drop edge\nh='+str(h)+'\nlength='+str(len(points)))
+ plt.plot(points[:,0],points[:,1])
+ plt.show()
+ plt.close()
+ #print(points)
+
+ points[:, 1] = - np.array([y - np.dot(a, np.power(y,
+ range(len(a)))) for y in points[:, 1]])
+ center = (np.max(points[:, 0]) + np.min(points[:, 0]))/2
+ points[:, 0] = points[:, 0] - center
+ h = np.max(points[:, 1])
+ points = np.vstack([points[:, 0],
+ h - points[:, 1]]).T
+
+ if display:
+ print('Running Bashforth-Adams fit...\n')
+ cap_length, curv = fit_bashforth_adams(points).x
+ θs, pred, Bo = sim_bashforth_adams(h, cap_length, curv, profile.shape[0])
+ ϕ['left'] = -np.min(θs)
+ ϕ['right'] = np.max(θs)
+
+ θ = (ϕ['left'] + ϕ['right'])/2
+
+ R0 = pred[np.argmax(θs),0] - pred[np.argmin(θs),0]
+ baseline_width = R0
+
+ P = 2*cap_length/ curv
+ volume = np.pi * R0 * (R0 * h + R0 * P - 2 * np.sin(θ))
+ x = pred[:, 0] + center
+ y = np.array([np.dot(a, np.power(y, range(len(a)))) + y
+ for y in (pred[:, 1] - h)])
+ fit = np.array([x, y]).T
+
+ fit_time = time.time() - fit_start_time
+
+ # calculate r squared value of fit
+
+ errors = YL_fit_errors(profile,fit,False)
+
+ else:
+ raise Exception('Unknown fit type! Try another.')
+
+ #drop symmetry r2 score
+ #flip
+ drop = profile
+ drop[:,1] = -drop[:,1]
+ #find apex index
+ apex_indices = np.argwhere(drop[:,1] == np.max(drop[:,1]))
+ apex_index = int(np.mean(apex_indices))
+ #translate so that apex is on x=0, bottom point on y=0
+ drop[:,1]=drop[:,1]-min(drop[:,1])
+ drop[:,0]=drop[:,0]-drop[:,0][apex_index]
+
+ left = drop[:apex_index]
+ right = drop[apex_index:]
+
+ # flip left side, so both facing right
+ left[:,0]=-left[:,0]
+ left = left[::-1] #reverse order so coords start from apex
+
+ if len(left[:,0])>len(right[:,0]):
+ diff = len(left[:,0]) - len(right[:,0])
+ left = left[diff:]
+ sym_errors = YL_fit_errors(left,right,False)
+ if len(left[:,0])= circle.shape[0] and iters < lim:
+
+ # Extract and fit only those points outside
+ if 0:# the previously fit circle
+ points = np.array([(x, y) for x, y in circle
+ if (x - z[0]) ** 2 + (y - z[1]) ** 2
+ >= r ** 2])
+ else:
+ points = circle
+ res = fit_circle(points, width)
+ *z, r = res['x']
+ iters += 1
+ else:
+ points = circle
+
+ res['x'] = [round(value) for value in res['x']] #round values up to account for pixelated image
+ res['x'][2] += 1 # to account for if the line is the next pixel over
+
+ x_t, y_t = find_intersection(a, res['x'])
+ #print('x_t is: ',x_t)
+ #print('y_t is: ',y_t)
+
+ v1, v2 = generate_circle_vectors([x_t, y_t])
+
+ Ï• = {i: calculate_angle(v1, v2) for i in ['left', 'right']}
+ if fit_type == 'circular':
+ baseline_width = 2 * x_t
+
+ volume = (2/3 * np.pi * r ** 3
+ + np.pi * r ** 2 * y_t
+ - np.pi * y_t ** 3 / 3)
+
+ # Fitted circle
+ theta = np.linspace(0, 2 * np.pi, num=100)
+ x = z[0] + r * np.cos(theta)
+ y = z[1] + r * np.sin(theta)
+ fit = np.array([x, y]).T
+ else:
+ if display:
+ #print('points: ',points)
+ plt.title('points before near edge sort')
+ plt.plot(points[:,0],points[:,1])
+ plt.show()
+ plt.close()
+ if 0:# Get points within 10 pixels of the circle edge - done better by grouping and prepare_hydrophobic
+ # not appropriate for high bond numbers but will also work for low angle drops
+ points = np.array([(x, y) for x, y in circle
+ if (x - z[0]) ** 2 + (y - z[1]) ** 2
+ >= (r-10) ** 2])
+ if display:
+ #print('points: ',points)
+ plt.title('points near drop edge\nh='+str(h)+'\nlength='+str(len(points)))
+ plt.plot(points[:,0],points[:,1])
+ plt.show()
+ plt.close()
+ #print(points)
+
+ points[:, 1] = - np.array([y - np.dot(a, np.power(y,
+ range(len(a)))) for y in points[:, 1]])
+ center = (np.max(points[:, 0]) + np.min(points[:, 0]))/2
+ points[:, 0] = points[:, 0] - center
+ h = np.max(points[:, 1])
+ points = np.vstack([points[:, 0],
+ h - points[:, 1]]).T
+
+ if display:
+ print('Running Bashforth-Adams fit...\n')
+ cap_length, curv = fit_bashforth_adams(points).x
+ θs, pred, Bo = sim_bashforth_adams(h, cap_length, curv, profile.shape[0])
+ ϕ['left'] = -np.min(θs)
+ ϕ['right'] = np.max(θs)
+
+ θ = (ϕ['left'] + ϕ['right'])/2
+
+ R0 = pred[np.argmax(θs),0] - pred[np.argmin(θs),0]
+ baseline_width = R0
+
+ P = 2*cap_length/ curv
+ volume = np.pi * R0 * (R0 * h + R0 * P - 2 * np.sin(θ))
+ x = pred[:, 0] + center
+ y = np.array([np.dot(a, np.power(y, range(len(a)))) + y
+ for y in (pred[:, 1] - h)])
+ fit = np.array([x, y]).T
+
+ fit_time = time.time() - start_time
+
+ # calculate r squared value of fit
+
+ errors = YL_fit_errors(profile,fit,False)
+
+ else:
+ raise Exception('Unknown fit type! Try another.')
+
+ #drop symmetry r2 score
+ #flip
+ drop = profile.copy()
+ drop[:,1] = -drop[:,1]
+ #find apex index
+ apex_indices = np.argwhere(drop[:,1] == np.max(drop[:,1]))
+ apex_index = int(np.mean(apex_indices))
+ #translate so that apex is on x=0, bottom point on y=0
+ drop[:,1]=drop[:,1]-min(drop[:,1])
+ drop[:,0]=drop[:,0]-drop[:,0][apex_index]
+
+ left = drop[:apex_index]
+ right = drop[apex_index:]
+
+ # flip left side, so both facing right
+ left[:,0]=-left[:,0]
+ left = left[::-1] #reverse order so coords start from apex
+
+ if len(left[:,0])>len(right[:,0]):
+ diff = len(left[:,0]) - len(right[:,0])
+ left = left[diff:]
+ sym_errors = YL_fit_errors(left,right,False)
+ if len(left[:,0]) 10:
+ print(thing)
+
+ epochs_range = range(len(history['loss']))#range(epochs)
+
+ plt.figure(figsize=(20, 10))
+
+ plt.subplot(1, 3, 1)
+ plt.plot(epochs_range, val_mae, label='Validation MAE')
+ plt.plot(epochs_range, mae, label='Training MAE')
+ if show_restart_points == True:
+ for i, point in enumerate(restart_points):
+ if i==0:
+ plt.plot([point,point], [min(mae),max(mae)],'r',label='Restart Points')
+ else:
+ plt.plot([point,point], [min(mae),max(mae)],'r')
+ plt.legend(loc='upper right')
+ plt.title('Training and Validation MAE\nFinal training MAE: '+str(mae[-1])+'\nFinal val MAE: '+str(val_mae[-1]))
+ plt.yscale('log')
+ plt.xscale('log')
+ plt.ylabel('Mean Absolute Error')
+ plt.xlabel('Epoch')
+
+ plt.subplot(1, 3, 2)
+ plt.plot(epochs_range, val_mse, label='Validation MSE')
+ plt.plot(epochs_range, mse, label='Training MSE')
+ if show_restart_points == True:
+ for i, point in enumerate(restart_points):
+ if i==0:
+ plt.plot([point,point], [min(mse),max(mse)],'r',label='Restart Points')
+ else:
+ plt.plot([point,point], [min(mse),max(mse)],'r')
+ plt.legend(loc='upper right')
+ plt.title('Training and Validation MSE')
+ plt.title('Training and Validation MSE\nFinal training MSE: '+str(mse[-1])+'\nFinal val MSE: '+str(val_mse[-1]))
+ plt.yscale('log')
+ plt.xscale('log')
+ plt.ylabel('Mean Squared Error')
+ plt.xlabel('Epoch')
+
+ plt.subplot(1, 3, 3)
+
+
+ plt.plot(epochs_range, val_loss, label='Validation Loss')
+ plt.plot(epochs_range, loss, label='Training Loss')
+ if show_restart_points == True:
+ for i, point in enumerate(restart_points):
+ if i==0:
+ plt.plot([point,point], [min(loss),max(loss)],'r',label='Restart Points')
+ else:
+ plt.plot([point,point], [min(loss),max(loss)],'r')
+ plt.legend(loc='upper right')
+ plt.title('Training and Validation Loss')
+ plt.title('Training and Validation loss\nFinal training loss: '+str(loss[-1])+'\nFinal val loss: '+str(val_loss[-1]))
+ plt.yscale('log')
+ plt.xscale('log')
+ plt.ylabel('Loss')
+ plt.xlabel('Epoch')
+ if display == True:
+ plt.show()
+
+ if save_path != None:
+ plt.savefig(save_path, format='png')
+ plt.close()
+
+print('done')
diff --git a/modules/ML_model/prepare_experimental.py b/modules/ML_model/prepare_experimental.py
new file mode 100644
index 0000000..80b34ea
--- /dev/null
+++ b/modules/ML_model/prepare_experimental.py
@@ -0,0 +1,2233 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+import matplotlib.pyplot as plt
+import numpy as np
+import cv2
+import tensorflow as tf
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import math # for tilt_correction
+from scipy import misc, ndimage # for tilt_correction
+import time # for recording timings
+import io #for saving to memory
+
+def auto_crop(img, low=50, high=150, apertureSize=3, verbose=0): # DS 08/06/23
+ '''
+ Automatically identify where the crop should be placed within the original
+ image
+
+ This function utilizes the opencv circular and linear Hough transfrom
+ implementations to identify the most circular object in the image
+ (the droplet), and center it within a frame that extends by padding to each
+ side.
+
+ :param img: 2D numpy array of [x,y] coordinates of the edges of the
+ image
+ :param low: Value of the weak pixels in the dual thresholding
+ :param high: Value of the strong pixels in the dual thresholding
+ :param apertureSize: The aperture size variable given to cv2.Canny during
+ edge detection
+ :param verbose: Integer values from 0 to 2, giving varying degrees of detail
+ :return: list of [left, right, top, bottom] values for the edges of the
+ bounding box
+ '''
+
+
+ if verbose >=1:
+ print('Performing auto-cropping, please wait...')
+
+ # find edges in the image
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
+ edges = cv2.Canny(gray,low,high,apertureSize = apertureSize)
+
+ #hough circle to find droplet - minRadius at 5% img width
+ circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1,
+ minDist=max(img.shape), #one circle
+ param1=30,
+ param2=15,
+ minRadius=int(img.shape[1]*0.02), #0.05
+ maxRadius=0)
+
+ if circles is not None:
+ circles = np.uint16(np.around(circles))
+ for i in circles[0, :]:
+ center = (i[0], i[1])
+ radius = i[2]
+
+ if verbose >= 2:
+ circle1 = plt.Circle(center, 1, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.imshow(img)
+ plt.title("Hough circle")
+ plt.show()
+ plt.close()
+ else:
+ #hough circle to find droplet - minRadius at 0
+ circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1,
+ minDist=max(img.shape), #one circle
+ param1=30,
+ param2=20,
+ minRadius=0,
+ maxRadius=0)
+ if circles is not None:
+ circles = np.uint16(np.around(circles))
+ for i in circles[0, :]:
+ center = (i[0], i[1])
+ radius = i[2]
+
+ if verbose >= 2:
+ circle1 = plt.Circle(center, 1, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.imshow(img)
+ plt.title("Hough circle")
+ plt.show()
+ plt.close()
+ else:
+ print('Hough circle failed to identify a drop')
+
+ #crop image based on circle found (this prevents hough line identifying the needle)
+ bottom = int(center[1] + (radius * 1.2)) # add 20% padding
+ if bottom>img.shape[0]:
+ bottom = img.shape[0]
+ top = int(center[1] - (radius * 1.2)) #add 20% padding
+ if top < 0:
+ top=0
+
+ img = img[top:bottom,:]
+ edges = cv2.Canny(img,50,150,apertureSize = 3)
+ center = (center[0], -(center[1] - bottom)) #reassign circle center to new cropped image
+
+ if verbose>=2:
+ plt.imshow(img)
+ plt.title('image after top and bottom crop')
+ plt.show()
+ plt.close()
+
+ #hough lines to find baseline
+ lines = cv2.HoughLines(edges,1,np.pi/180,100)
+
+ if lines is not None: # if the HoughLines function is successful
+ if verbose >= 2:
+ print('shape of image: ',img.shape)
+ for i,line in enumerate(lines):
+ if i==0:
+ rho,theta = line[0]
+ a = np.cos(theta)
+ b = np.sin(theta)
+ x0 = a*rho
+ y0 = b*rho
+ x1 = int(x0)
+ y1 = int(y0 + 1000*(a))
+ x2 = int(x0 - img.shape[1]*(-b))
+ y2 = int(y0 - 1000*(a))
+ if verbose >= 2:
+ plt.title('hough approximated findings')
+ plt.imshow(img)
+ circle1 = plt.Circle(center, 1, color='r')
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+ ax = plt.gca()
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+ fig = plt.gcf()
+
+ plt.plot([x1,x2],[y1,y2],'r')
+ fig = plt.gcf()
+ p1,p2 = (x1,y1),(x2,y2) # baseline exists between these points
+ if verbose >= 2:
+ print('baseline goes from ',p1,' to ',p2)
+
+ # now find bounds
+
+ # find intercept of line and circle
+ dx, dy = p2[0] - p1[0], p2[1] - p1[1]
+
+ a = dx**2 + dy**2
+ b = 2 * (dx * (p1[0] - center[0]) + dy * (p1[1] - center[1]))
+ c = (p1[0] - center[0])**2 + (p1[1] - center[1])**2 - radius**2
+
+ discriminant = b**2 - 4 * a * c
+ if discriminant > 0:
+ t1 = (-b + discriminant**0.5) / (2 * a)
+ t2 = (-b - discriminant**0.5) / (2 * a)
+
+ intersect1, intersect2 = None,None
+ intersect1, intersect2 = (dx * t1 + p1[0], dy * t1 + p1[1]), (dx * t2 + p1[0], dy * t2 + p1[1])
+
+ if intersect1 == None or intersect2 == None:
+ raise 'found baseline does not intersect with found circle'
+
+ if verbose >= 2:
+ plt.plot(intersect1[0],intersect1[1],'o',color='orange')
+ plt.plot(intersect2[0],intersect2[1],'o',color='orange')
+
+ plt.show()
+ plt.close()
+
+ bottom = int(max([intersect1[1],intersect2[1]])) #max value of intersect points
+ top = int(center[1] - radius) #assume top of drop is in image
+ if center[1] < max([intersect1[1],intersect2[1]]):
+ right = int(center[0] + radius)
+ else:
+ right = int(max([intersect1[0],intersect2[0]]))
+ if center[1] < min([intersect1[1],intersect2[1]]):
+ left = int(center[0] - radius)
+ else:
+ left = int(min([intersect1[0],intersect2[0]]))
+ else:
+ # bounds is (left,right,top,bottom)
+ print('No baseline-drop intercept found')
+ left = int(center[0] - radius)
+ right = int(center[0] + radius)
+ top = int(center[1] - radius)
+ bottom = int(center[1] + radius)
+
+ else: #if the HoughLine function cannot identify a surface line, use the circle as a guide for bounds
+ left = int(center[0] - radius)
+ right = int(center[0] + radius)
+ top = int(center[1] - radius)
+ bottom = int(center[1] + radius)
+
+ pad = int(max([right - left,bottom-top])/4)
+
+ top -= pad
+ bottom += pad
+ left -= pad
+ right += pad
+
+ if left < 0:
+ left = 0
+ if top < 0:
+ top = 0
+ if bottom > img.shape[0]:
+ bottom = img.shape[0]
+ if right > img.shape[1]:
+ right = img.shape[1]
+
+ if verbose >= 2:
+ print('lower most y coord of drop: ', bottom)
+ print('upper most y coord of drop: ', top)
+ print('right most x coord of drop: ', right)
+ print('left most x coord of drop: ', left)
+
+ bounds = [left,right,top,bottom]
+ new_img = img[top:bottom,left:right]
+
+ if verbose >= 1:
+ plt.title('cropped drop')
+ plt.imshow(new_img)
+ plt.show()
+ plt.close()
+
+ return new_img, bounds
+
+def find_intersection(baseline_coeffs, circ_params):
+ '''
+ Compute the intersection points between the best fit circle and best-fit
+ baseline.
+
+ For this we rely on several coordinate transformations, first a
+ translation to the centerpoint of the circle and then a rotation to give
+ the baseline zero-slope.
+
+ :param baseline_coeffs: Numpy array of coefficients to the baseline
+ polynomial
+ :param circ_params: centerpoint and radius of best-fit circle
+ :return: (x,y) point of intersection between these two shapes
+ '''
+ *z, r = circ_params
+ b, m = baseline_coeffs[0:2]
+ # Now we need to actually get the points of intersection
+ # and the angles from these fitted curves. Rather than brute force
+ # numerical solution, use combinations of coordinate translations and
+ # rotations to arrive at a horizontal line passing through a circle.
+ # First step will be to translate the origin to the center-point
+ # of our fitted circle
+ # x = x - z[0], y = y - z[1]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = m * x + (m * z[0] + b - z[1])
+ # Now we need to rotate clockwise about the origin by an angle q,
+ # s.t. tan(q) = m
+ # Our transformation is defined by the typical rotation matrix
+ # [x;y] = [ [ cos(q) , sin(q) ] ;
+ # [-sin(q) , cos(q) ] ] * [ x ; y ]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = (m*z[0] + b[0] - z[1])/sqrt(1 + m**2)
+ # (no dependence on x - as expected)
+
+ # With this simplified scenario, we can easily identify the points
+ # (x,y) where the line y = B
+ # intersects the circle x**2 + y**2 = r**2
+ # In our transformed coordinates, only keeping the positive root,
+ # this is:
+
+ B = (m * z[0] + b - z[1]) / np.sqrt(1 + m**2)
+
+ if B > r:
+ raise ValueError("The circle and baseline do not appear to intersect")
+ x_t = np.sqrt(r ** 2 - B ** 2)
+ y_t = B
+
+ # TODO:// replace the fixed linear baseline with linear
+ # approximations near the intersection points
+
+ return x_t, y_t
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path_new(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic_new(coords,xi=0.8,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if display: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+ #print('The x value of the apex is: ',xapex)
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] < xapex:
+ l_drop.append(n)
+ if n[0] > xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ l_drop[:,[0]] = -l_drop[:,[0]] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ xhalfdrop = halfdrop[:,[0]].reshape(len(halfdrop[:,[0]]))
+ yhalfdrop = halfdrop[:,[1]].reshape(len(halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(halfdrop[:,[1]]) + (max(halfdrop[:,[1]]) - min(halfdrop[:,[1]]))*percent
+ for n in halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if display: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+
+
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ xCP = min(xbottom)
+ yCP = []
+ for coord in new_halfdrop:
+ if coord[0]==xCP:
+ yCP.append(coord[1])
+ yCP =min(yCP)
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if display: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.1
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+ #print('The x value of the apex is: ',xapex)
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] < xapex:
+ l_drop.append(n)
+ if n[0] > xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ l_drop[:,[0]] = -l_drop[:,[0]] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ xhalfdrop = halfdrop[:,[0]].reshape(len(halfdrop[:,[0]]))
+ yhalfdrop = halfdrop[:,[1]].reshape(len(halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(halfdrop[:,[1]]) + (max(halfdrop[:,[1]]) - min(halfdrop[:,[1]]))*percent
+ for n in halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if display: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+
+ xCP = min(xbottom)
+ yCP = []
+ for coord in halfdrop:
+ if coord[0]==xCP:
+ yCP.append(coord[1])
+ yCP =min(yCP)
+ #print('The first few coordinates of xhalfdrop are: ', xhalfdrop[:3])
+
+ #print('The coordinates of the contact point are (',xCP,',',yCP,')')
+
+ CPs[counter] = [xCP, yCP]
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # remove surface line past the contact point
+
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ try:
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ except:
+ gray = img
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def tilt_correction(img, baseline):
+ """img is an image input
+ baseline is defined by two points in the image"""
+
+ p1,p2 = baseline
+ x1,y1 = p1
+ x2,y2 = p2
+
+ #assert(not x1 == x2 or y1 == y2)
+ if y1 == y2:
+ return img
+
+ t = float(y2 - y1) / (x2 - x1)
+ rotate_angle = math.degrees(math.atan(t))
+ if rotate_angle > 45:
+ rotate_angle = -90 + rotate_angle
+ elif rotate_angle < -45:
+ rotate_angle = 90 + rotate_angle
+ rotate_img = ndimage.rotate(img, rotate_angle)
+ #print('image rotated by '+str(rotate_angle)+' degrees')
+
+ # crop black edges created when rotating
+ width = np.sin(np.deg2rad(rotate_angle))
+ side = math.ceil(abs(width*rotate_img.shape[1]))
+ roof = math.ceil(abs(width*rotate_img.shape[0]))
+ rotate_img_crop = rotate_img[roof:-roof,side:-side]
+
+ return rotate_img_crop
+
+def preprocess(img, display=False):
+ """This code serves as a discrete instance of image preprocessing before contact
+ angle fit software is implemented.
+
+ This includes automatic identification of the drop through Hough transform,
+ followed by cropping of the image to isolate the drop. Tilt correction is then
+ performed using the identified contact points of the drop.
+ An isolated (cropped) and tilt corrected image is outputted.
+ """
+ # preprocessing
+ img_crop, bounds = auto_crop(img.copy())
+ L,R,T,B = bounds
+ edges_pts = extract_edges_CV(img_crop) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img_crop)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ profile,CPs = prepare_hydrophobic(edges_pts,display)
+ baseline = [CPs[0],CPs[1]]
+
+ tilt_corrected_crop= tilt_correction(img_crop, baseline)
+
+ if display:
+ plt.imshow(tilt_corrected_crop)
+ plt.title('tilt corrected and cropped image')
+ plt.show()
+ plt.close()
+
+ return tilt_corrected_crop
+
+def process_halfdrop(coords, percent=0.15, display=False):
+ # isolate the top of the contour so excess surface can be deleted
+ percent = 0.15
+ bottom = []
+ top = [] # will need this later
+ div_line_value = min(coords[:,1]) + (max(coords[:,1]) - min(coords[:,1]))*percent
+ for n in coords:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ # find the apex of the drop
+ xtop,ytop = top[:,0],top[:,1] # isolate top 90% of drop
+ xapex = (max(xtop) + min(xtop))/2
+
+ del_indexes = []
+ for index,coord in enumerate(coords):
+ if coord[0]>max(top[:,0]) or coord[0] xapex:
+ r_drop.append(n)
+ r_drop = np.array(r_drop)
+
+ #print('length of left drop is: ',len(l_drop))
+ #print('length of right drop is: ', len(r_drop))
+
+ # transpose half drop so the apex is at 0,0
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ halfdrop = r_drop
+
+ if halfdrop[0,1]input_len:
+ if display==True:
+ plt.plot(X,Z)
+ plt.title('half-drop contour, length of '+str(len(X)))
+ plt.show()
+ plt.close()
+
+ print("Contour of length "+str(len(X))+" is too long for the designated output dimensionality of ("+str(input_len)+",2)")
+ print("reducing image resolution to 128 dpi...")
+
+ img = save_to_new_dpi(img, dpi=128)
+ repeat = True
+ break
+
+ if display == True:
+ plt.plot(X,Z)
+ plt.title('half-drop contour, length of '+str(len(X)))
+ plt.show()
+ plt.close()
+ elif len(X)<112:
+ print('WARNING: contour shorter than shortest training data, inaccurate predictions likely')
+ print('length of input is '+str(len(X)))
+
+ for i in range(input_len):
+ if i < len(X):
+ a = X[i]
+ b = Z[i]
+ coord = [a,b]
+ coordinates.append(coord)
+ else:
+ coordinates.append([0,0])
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(coordinates))))
+ for k in coordinates:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ if counter == 0:
+ plt.title('Left halfdrop')
+ elif counter == 1:
+ plt.title('Right halfdrop')
+ plt.show()
+ plt.close()
+ #key = image.split('/')[-1].split('_')[-1][:-4]
+ key = counter
+ CV_contours[key]= np.array(coordinates)
+
+ counter += 1
+
+ if repeat == True: #repeat the above with the resolution dropped
+ edges = extract_edges_CV(img)
+
+ if cluster == True:
+ #xi = 0.8
+ #dic,dic2 = cluster_OPTICS(edges,xi=xi),cluster_OPTICS(edges,out_style='xy',xi=xi)
+ eps = 3
+ dic,dic2 = cluster_OPTICS(edges,eps=eps),cluster_OPTICS(edges,out_style='xy',eps=eps)
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.axis('equal')
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ # take the longest group
+ longest = dic[maxkey]
+ else:
+ longest = edges
+
+ coords = np.array(longest)
+
+ coords[:,1] = - coords[:,1] # flip image coords to cartesian coords
+
+ counter = 0
+ CV_contours = {}
+ flipped = np.copy(coords)
+ flipped[:,0] = -coords[:,0]
+ for coords in [flipped,coords.copy()]: # for flipped and right side
+ if 1:
+ plt.title('check')
+ plt.plot(coords[:,0],coords[:,1])
+ plt.show()
+ plt.close()
+
+ # isolate the top of the contour so excess surface can be deleted
+ X, Z = process_halfdrop(coords, display=display)
+
+ # zero padd contours
+ if input_len == None:
+ input_len = len(X)
+
+ coordinates = []
+
+ # if image is too large and contour is too long, decrease image resolution
+ if len(X)>input_len:
+ if display==True:
+ plt.plot(X,Z)
+ plt.title('half-drop contour, length of '+str(len(X)))
+ plt.show()
+ plt.close()
+
+ print("Half-drop contour of length "+str(len(X))+" is too long for the designated output dimensionality of ("+str(input_len)+",2)")
+ print("Continuing with every second point of the contour removed")
+
+ X = X[::2]
+ Z = Z[::2]
+
+ if display == True:
+ plt.plot(X,Z)
+ plt.title('half-drop contour, length of '+str(len(X)))
+ plt.show()
+ plt.close()
+ elif len(X)<112:
+ print('WARNING: half-drop contour shorter than shortest training data, inaccurate predictions likely')
+ print('length of input is '+str(len(X)))
+ else:
+ print('Half-drop contour of length '+str(len(X))+' is now in range')
+
+ for i in range(input_len):
+ if i < len(X):
+ a = X[i]
+ b = Z[i]
+ coord = [a,b]
+ coordinates.append(coord)
+ else:
+ coordinates.append([0,0])
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(coordinates))))
+ for k in coordinates:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ if counter == 0:
+ plt.title('Left halfdrop')
+ elif counter == 1:
+ plt.title('Right halfdrop')
+ plt.show()
+ plt.close()
+ #key = image.split('/')[-1].split('_')[-1][:-4]
+ key = counter
+ CV_contours[key]= np.array(coordinates)
+
+ counter += 1
+
+ if right_only == True:
+ pred_ds = CV_contours[1]
+ else:
+ if input_len == None: # arrays must have consistent dimensions
+ pred_ds = {}
+ for counter in [0,1]:
+ pred_ds[counter] = CV_contours[counter]
+ else: #
+ pred_ds = np.zeros((2,input_len,2))
+ for counter in [0,1]:
+ pred_ds[counter] = CV_contours[counter]
+
+ return pred_ds
+
+def prepare4model_v03(coords, input_len=1223, right_only=False, display=False):
+ """Take the contour of the whole drop, and chop it into left and right sides ready for model input"""
+ coords[:,1] = - coords[:,1] # flip image coords to cartesian coords
+
+ counter = 0
+ CV_contours = {}
+ flipped = np.copy(coords)
+ flipped[:,0] = -coords[:,0]
+
+ for coords in [flipped,coords.copy()]: # for flipped and right side
+ # isolate the top of the contour so excess surface can be deleted
+ percent = 0.15
+ bottom = []
+ top = [] # will need this later
+ div_line_value = min(coords[:,1]) + (max(coords[:,1]) - min(coords[:,1]))*percent
+ for n in coords:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ # find the apex of the drop
+ xtop,ytop = top[:,0],top[:,1] # isolate top 90% of drop
+ xapex = (max(xtop) + min(xtop))/2
+
+ del_indexes = []
+ for index,coord in enumerate(coords):
+ if coord[0]>max(top[:,0]) or coord[0] xapex:
+ r_drop.append(n)
+ r_drop = np.array(r_drop)
+
+ #print('length of left drop is: ',len(l_drop))
+ #print('length of right drop is: ', len(r_drop))
+
+ # transpose and normalise half drop so the apex is at 0,1
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ halfdrop = r_drop
+
+ if halfdrop[0,1] 180:
+ prediction_left = 180
+ elif prediction_left < 0:
+ prediction_left = 0
+
+ return prediction_left, timings
+
+ if side == 'right':
+ input_right = np.array([pred_ds[0,1]])
+ ML_prediction_start_time = time.time()
+
+ prediction_right = model.predict(input_right)
+
+ ML_prediction_time = time.time() - ML_prediction_start_time
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = ML_prediction_time
+ timings['analysis time'] = analysis_time
+
+ if prediction_right > 180:
+ prediction_right = 180
+ elif prediction_right < 0:
+ prediction_right = 0
+
+ return prediction_right, timings
+
+ if side == 'both':
+ ML_prediction_start_time = time.time()
+
+ predictions = model.predict(pred_ds)
+
+ ML_prediction_time = time.time() - ML_prediction_start_time
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = ML_prediction_time
+ timings['analysis time'] = analysis_time
+
+ if predictions[0] > 180:
+ predictions[0] = 180
+ elif predictions[0] < 0:
+ predictions[0] = 0
+ if predictions[1] > 180:
+ predictions[1] = 180
+ elif predictions[1] < 0:
+ predictions[1] = 0
+
+ return predictions, timings
+
+def experimental_prediction(image, side='both', cluster=True, display=False):
+ """Takes an input experimental image, and outputs the predicted contact
+ angle based on the contour input model found in this folder:
+ './modules/ML_model/'
+ """
+
+ start_time = time.time()
+
+ model_path = './'
+ model = tf.keras.models.load_model(model_path)
+
+ if side == 'left':
+ preprocessing_start_time = time.time()
+
+ both_inputs = prepare4model_v03_img(image, input_len = 1223, cluster=cluster, display=display)
+ input_left = np.array([both_inputs[0,0]])
+
+ ML_preprocessing_time = time.time() - preprocessing_start_time
+ ML_prediction_start_time = time.time()
+
+ prediction_left = model.predict(input_left)
+
+ ML_prediction_time = time.time() - ML_prediction_start_time
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = ML_preprocessing_time
+ timings['fit time'] = ML_prediction_time
+ timings['analysis time'] = analysis_time
+
+ if prediction_left > 180:
+ prediction_left = 180
+ elif prediction_left < 0:
+ prediction_left = 0
+
+ return prediction_left, timings
+
+ if side == 'right':
+ preprocessing_start_time = time.time()
+
+ both_inputs = prepare4model_v03_img(image, input_len = 1223, cluster=cluster, display=display)
+ input_right = np.array([both_inputs[0,1]])
+
+ ML_preprocessing_time = time.time() - preprocessing_start_time
+ ML_prediction_start_time = time.time()
+
+ prediction_right = model.predict(input_right)
+
+ ML_prediction_time = time.time() - ML_prediction_start_time
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = ML_preprocessing_time
+ timings['fit time'] = ML_prediction_time
+ timings['analysis time'] = analysis_time
+
+ if prediction_right > 180:
+ prediction_right = 180
+ elif prediction_right < 0:
+ prediction_right = 0
+
+ return prediction_right, timings
+
+ if side == 'both':
+ preprocessing_start_time = time.time()
+
+ inputs = prepare4model_v03_img(image, input_len = 1223, cluster=cluster, display=display)
+
+ ML_preprocessing_time = time.time() - preprocessing_start_time
+ ML_prediction_start_time = time.time()
+
+ predictions = model.predict(inputs)
+
+ ML_prediction_time = time.time() - ML_prediction_start_time
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = ML_preprocessing_time
+ timings['fit time'] = ML_prediction_time
+ timings['analysis time'] = analysis_time
+
+ if predictions[0] > 180:
+ predictions[0] = 180
+ elif predictions[0] < 0:
+ predictions[0] = 0
+ if predictions[1] > 180:
+ predictions[1] = 180
+ elif predictions[1] < 0:
+ predictions[1] = 0
+
+ return predictions, timings
+
+if 0:
+ IMG_PATH = '../../RICOphobic_cropped.png'
+ img = cv2.imread(IMG_PATH)
+ predictions, timings = experimental_prediction(img, display=True)
+ print()
+ print('predictions: ',predictions)
+ print('timings: ',timings)
+
+ print()
diff --git a/modules/ML_model/saved_model.pb b/modules/ML_model/saved_model.pb
new file mode 100644
index 0000000..86ccd3e
Binary files /dev/null and b/modules/ML_model/saved_model.pb differ
diff --git a/modules/ML_model/test_data.png b/modules/ML_model/test_data.png
new file mode 100644
index 0000000..ade079b
Binary files /dev/null and b/modules/ML_model/test_data.png differ
diff --git a/modules/ML_model/test_set_spread.png b/modules/ML_model/test_set_spread.png
new file mode 100644
index 0000000..a008f6b
Binary files /dev/null and b/modules/ML_model/test_set_spread.png differ
diff --git a/modules/ML_model/trainHistoryDict.pkl b/modules/ML_model/trainHistoryDict.pkl
new file mode 100644
index 0000000..cee77cc
Binary files /dev/null and b/modules/ML_model/trainHistoryDict.pkl differ
diff --git a/modules/ML_model/train_continue.py b/modules/ML_model/train_continue.py
new file mode 100644
index 0000000..ef7a1be
--- /dev/null
+++ b/modules/ML_model/train_continue.py
@@ -0,0 +1,337 @@
+# repeat the above but on a fraction of the contour models data
+
+
+import matplotlib
+matplotlib.use('Agg')
+import matplotlib.pyplot as plt
+
+#import pickle5 as pickle
+import pickle
+import random
+import numpy as np
+import datetime
+import time
+import os
+
+import tensorflow as tf
+from tensorflow.keras import layers
+from tensorflow.keras.models import Sequential
+from tensorflow.keras.callbacks import Callback # for early stopping
+from tensorflow.keras.callbacks import ModelCheckpoint #for checkpoint saves
+import warnings # for early stopping
+
+import logging #for optuna logging
+import sys #for optuna logging
+
+import optuna #for hyperparameter optimisation
+#from optuna.integration import TFKerasPruningCallback
+from optuna.trial import TrialState
+
+# define .pkl load function
+def load_obj(name ):
+ with open(name, 'rb') as f:
+ return pickle.load(f)
+
+def load_dataset():
+ data1 = load_obj('/scratch/oe97/ds1693/model_v03/contour_dataset_4par_110-180.pkl')
+ data2 = load_obj('/scratch/oe97/ds1693/model_v03/contour_dataset_4par_ref_1223.pkl')
+ #data2 = load_obj('/data/gpfs/projects/punim1991/dgshaw/model_v03/test11/test11.2/contour_dataset_4par_ref3_-0.2to-0.08.pkl')
+ #data2 = load_obj('/data/gpfs/projects/punim1991/dgshaw/model_v03/contour_dataset_4par_ref3_1223.pkl')
+ data = {**data1, **data2}
+ #data = load_obj('/home/563/ds1693/model_v03/data120_130_surface_1_scaled20.pkl')
+ labels = list(data.keys())
+ random.Random(666).shuffle(labels)
+
+ # train on 20% of the data to try train faster, then retrain on the whole dataset once hyperparameters are optimised
+ twentypercent = int(len(labels)*0.2)
+ eightypercent = int(twentypercent*0.8)
+ train_keys = labels[:eightypercent]
+ test_keys = labels[eightypercent:twentypercent]
+
+ # turn data into arrays
+ def create_data_arr(keys,data):
+ data_set = []
+ for n in keys:
+ data_set.append(data[n])
+ return np.array(data_set)
+
+ train_ds = create_data_arr(train_keys,data)
+ test_ds = create_data_arr(test_keys,data)
+
+ # make sure everything is arrays, ds's are created as arrays above
+ train_keys = np.array([eval(key.split('_')[0]) for key in train_keys])
+ test_keys = np.array([eval(key.split('_')[0]) for key in test_keys])
+
+ return train_ds, train_keys, test_ds, test_keys
+
+
+class EarlyStoppingWhenErrorLow(Callback):
+ def __init__(self, monitor='val_mse', value=0.02, verbose=0):
+ super(Callback, self).__init__()
+ self.monitor = monitor
+ self.value = value
+ self.verbose = verbose
+
+ def on_epoch_end(self, epoch, logs={}):
+ current = logs.get(self.monitor)
+ if current is None:
+ warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
+
+ elif current < self.value:
+ if self.verbose > 0:
+ print("Epoch %05d: early stopping THR" % epoch)
+ self.model.stop_training = True
+
+def CA_activation(x):
+ return tf.keras.activations.relu(x,max_value=180)
+
+def create_model(trial):
+
+ # Hyperparameters to be tuned by Optuna.
+ learning_rate = 0.00011638101163629009 #trial.suggest_float("learning_rate", 1e-4, 1e-1, log=True)
+ batch_size = 42 #trial.suggest_int("batch_size", 32, 128, log=True)
+ model_width = 32 #trial.suggest_categorical("model_width", [8,16, 32, 64, 128, 256, 512, 1024])
+ es_patience = 1024 #trial.suggest_int("es_patience", 1024, 1025, log=True)
+
+ # Compose neural network with one hidden layer.
+ model = Sequential([
+ layers.Conv1D(model_width, 3, padding='same', activation='relu'),
+ layers.Conv1D(model_width/2, 3, padding='same', activation='relu'),
+ layers.Conv1D(model_width/4, 3, padding='same', activation='relu'),
+ layers.Flatten(),
+ layers.Dense(128, activation=CA_activation),
+ layers.Dense(1)
+ ])
+
+ # Compile model.
+ model.compile(
+ optimizer=tf.keras.optimizers.Adam(
+ learning_rate=learning_rate),
+ loss="mean_squared_error",
+ #metrics=(['mean_absolute_error'],['mean_squared_error'])
+ metrics=(['mean_absolute_error', 'mean_squared_error'])
+ )
+
+ return model, es_patience
+
+def load_model(trial):
+ #model_path = '/data/gpfs/projects/punim0012/dgshaw/model_v03/test9/test9.2/_mse_0.41930869221687317/'
+ model_path = '/data/gpfs/projects/punim1991/dgshaw/model_v03/test11/test11.3/_mse_0.4399887025356293'
+ learning_rate = 0.00011638101163629009 #trial.suggest_float("learning_rate", 1e-4, 1e-1, log=True)
+ es_patience = 256 #1024 #trial.suggest_int("es_patience", 1024, 1025, log=True)
+
+ model = tf.keras.models.load_model(model_path)
+
+ model.load_weights("./weights.best.hdf5")
+
+ model.compile(
+ optimizer=tf.keras.optimizers.Adam(
+ learning_rate=learning_rate),
+ loss="mean_squared_error",
+ metrics=(['mean_absolute_error'],['mean_squared_error'])
+ )
+
+ return model, es_patience
+
+def objective(trial):
+
+ # Clear clutter from previous TensorFlow graphs.
+ tf.keras.backend.clear_session()
+ print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
+
+ # prep for info collection
+ model_start_time = time.time()
+ write = []
+ write.append('TRAINING DATA\n')
+
+ # Metrics to be monitored by Optuna.
+ monitor = "val_mean_squared_error"
+
+ # Create tf.keras model instance.
+ #model, es_patience = load_model(trial)
+ #model, es_patience = create_model(trial)
+ # Register the custom activation function in the custom_objects dictionary
+ custom_objects = {"CA_activation": CA_activation}
+
+ # Load the model and pass the custom_objects dictionary
+ model = tf.keras.models.load_model("./weights.best.hdf5", custom_objects=custom_objects)
+ es_patience = 1024
+
+ # Create dataset instance.
+ train_ds, train_keys, test_ds, test_keys = load_dataset()
+ print('dataset loaded...')
+
+ baseline = 0.02
+ max_epochs = 100*es_patience
+ checkpointpath = "./weights.best.hdf5"
+ checkpoint = ModelCheckpoint(checkpointpath, monitor=monitor, verbose=0, save_best_only=True, mode='min')
+ es = tf.keras.callbacks.EarlyStopping(monitor=monitor, patience=es_patience, verbose=0, mode="min", restore_best_weights=True)
+ floor= EarlyStoppingWhenErrorLow(monitor=monitor, value=baseline, verbose=0)
+
+ history = model.fit(
+ train_ds, train_keys,
+ validation_split=0.25,
+ epochs=max_epochs,
+ callbacks=[checkpoint,floor,es]#,TFKerasPruningCallback(trial, monitor)]
+ )
+
+ # record info
+ training_time = time.time() - model_start_time
+ write.append("--- %s seconds ---" % training_time)
+ mae = history.history['mean_absolute_error']
+ val_mae = history.history['val_mean_absolute_error']
+ mse = history.history['mean_squared_error']
+ val_mse = history.history['val_mean_squared_error']
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ score = history.history[monitor][-1]
+ score_dir = '_mse_'+str(score)
+ model.save(str(score_dir))
+
+ with open(str(score_dir)+'/trainHistoryDict.pkl', 'wb') as f:
+ pickle.dump(history.history, f)
+ # to load: history = pickle.load(open('/trainHistoryDict'), "rb")
+
+ with open(str(score_dir)+'/modelsummary.txt', 'w') as f:
+ model.summary(print_fn=lambda x: f.write(x + '\n'))
+ #model.summary(print_fn=myprint) # save the model output to txt file
+
+ epochs_range = range(len(val_mae))
+
+ fig = plt.figure(figsize=(20, 10))
+
+ ax1 = fig.add_subplot(1, 3, 1,xscale='log',yscale='log',ylabel='Mean Absolute Error',xlabel='Epoch')
+ ax1.plot(epochs_range, val_mae, label='Validation MAE')
+ ax1.plot(epochs_range, mae, label='Training MAE')
+ ax1.legend(loc='upper right')
+ ax1.title.set_text('Training and Validation MAE')
+
+
+ ax2 = plt.subplot(1, 3, 2,xscale='log',yscale='log',ylabel='Mean Squared Error',xlabel='Epoch')
+ ax2.plot(epochs_range, val_mse, label='Validation MSE')
+ ax2.plot(epochs_range, mse, label='Training MSE')
+ ax2.legend(loc='upper right')
+ ax2.title.set_text('Training and Validation MSE')
+
+ ax3 = plt.subplot(1, 3, 3,xscale='log',yscale='log',ylabel='Loss',xlabel='Epoch')
+
+ ax3.plot(epochs_range, val_loss, label='Validation Loss')
+ ax3.plot(epochs_range, loss, label='Training Loss')
+ ax3.legend(loc='upper right')
+ ax3.title.set_text('Training and Validation Loss')
+
+ fig.savefig(str(score_dir)+'/training_history.png') # save
+
+ plt.tight_layout()
+
+ plt.close()
+
+ write.append('\nTEST DATA\n')
+ evaluate = model.evaluate(test_ds,test_keys,return_dict=True)
+ write.append(evaluate)
+
+ test_predictions = model.predict(test_ds).flatten()
+
+ fig = plt.figure(figsize=(12, 6))
+ ax1 = plt.subplot(1,2,1,ylabel='Predictions [angle, degrees]',xlabel='True Values [angle, degrees]',aspect='equal')
+ ax1.title.set_text('Test set')
+ ax1.scatter(test_keys, test_predictions, c='crimson')
+ lims = [0,180]
+ _ = plt.plot(lims, lims)
+
+ ax2 = plt.subplot(1,2,2,ylabel='Predictions [angle, degrees]',xlabel='True Values [angle, degrees]',aspect='equal',
+ xlim = [min(test_keys)-1, max(test_keys)+1],ylim = [min(test_predictions)-1, max(test_predictions)+1])
+ ax2.title.set_text('Test set')
+ ax2.scatter(test_keys, test_predictions, c='crimson')
+ lims = [min(test_keys), max(test_keys)]
+ _ = plt.plot(lims, lims)
+
+ plt.tight_layout()
+
+ plt.savefig(str(score_dir)+'/test_data.png') # save
+
+ plt.close()
+
+ # output spread of test set error
+ err = test_predictions-test_keys
+ mu = np.mean(err) # mean of distribution
+ sigma = np.std(err) # standard deviation of distribution
+ num_bins = 50
+ fig, ax = plt.subplots()
+ n, bins, patches = ax.hist(err, num_bins, density=True) # the histogram of the data
+
+ lower = np.mean(err) - (3*np.std(err))
+ upper = np.mean(err) + (3*np.std(err))
+ write.append('mean error is '+str.format('{0:.2e}',np.mean(err))+' and standard deviation is '+str.format('{0:.2e}',sigma))
+ write.append('99.7% of errors are between '+str(lower)+' and '+(str(upper)))
+ # add a 'best fit' line
+ y = ((1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-0.5 * (1 / sigma * (bins - mu))**2))
+ ax.plot(bins, y, '--')
+ # add mean and std deviation lines
+ ax.axvline(mu,ymax=0.9,color='r')
+ for n in [-1,1]:
+ ax.axvline(mu+(n*sigma),ymax=0.68*0.9,color='r')
+ for n in [-1,1]:
+ ax.axvline(mu+(n*2*sigma),ymax=0.05*0.9,color='r')
+ for n in [-1,1]:
+ ax.axvline(mu+(n*3*sigma),ymax=0.01*0.9,color='r')
+ ax.set_xlabel('Error')
+ ax.set_ylabel('Frequency')
+ ax.set_title(r'Histogram of test set error: $\mu$='+str.format('{0:.2e}', mu)+', $\sigma$='+str.format('{0:.2e}', sigma))
+
+ fig.tight_layout() # Tweak spacing to prevent clipping of ylabel
+ plt.savefig(str(score_dir)+'/test_set_spread.png') #save
+
+ plt.close()
+
+ with open(str(score_dir)+'/outputted_data.txt','w') as f:
+ for line in write:
+ f.write(str(line))
+ f.write('\n')
+
+ print('Done')
+
+ return score
+
+def show_result(study):
+
+ pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED])
+ complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE])
+
+ print("Study statistics: ")
+ print(" Number of finished trials: ", len(study.trials))
+ print(" Number of pruned trials: ", len(pruned_trials))
+ print(" Number of complete trials: ", len(complete_trials))
+
+ print("Best trial:")
+ trial = study.best_trial
+
+ print(" Value: ", trial.value)
+
+ print(" Params: ")
+ for key, value in trial.params.items():
+ print(" {}: {}".format(key, value))
+
+def main():
+
+ print("Start time: "+str(datetime.datetime.now()))
+
+ os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
+
+ study_name='contour_test2'
+ storage_name = "sqlite:///{}.db".format(study_name)
+ study = optuna.create_study(
+ study_name=study_name,
+ storage=storage_name,
+ direction="minimize",
+ pruner=optuna.pruners.MedianPruner(n_startup_trials=2),
+ load_if_exists=True
+ )
+
+ study.optimize(objective, n_trials=1, timeout=345600)#2700 sec is 45 min, 345600 is 4 days
+
+ show_result(study)
+
+if __name__ == "__main__":
+ main()
diff --git a/modules/ML_model/training_history.png b/modules/ML_model/training_history.png
new file mode 100644
index 0000000..8c8cf17
Binary files /dev/null and b/modules/ML_model/training_history.png differ
diff --git a/modules/ML_model/training_history_all.png b/modules/ML_model/training_history_all.png
new file mode 100644
index 0000000..029f182
Binary files /dev/null and b/modules/ML_model/training_history_all.png differ
diff --git a/modules/ML_model/variables/variables.data-00000-of-00001 b/modules/ML_model/variables/variables.data-00000-of-00001
new file mode 100644
index 0000000..685e4f9
Binary files /dev/null and b/modules/ML_model/variables/variables.data-00000-of-00001 differ
diff --git a/modules/ML_model/variables/variables.index b/modules/ML_model/variables/variables.index
new file mode 100644
index 0000000..a02b136
Binary files /dev/null and b/modules/ML_model/variables/variables.index differ
diff --git a/modules/__init__.py b/modules/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/modules/__init__.pyc b/modules/__init__.pyc
new file mode 100644
index 0000000..af7b6a9
Binary files /dev/null and b/modules/__init__.pyc differ
diff --git a/modules/__pycache__/BA_fit.cpython-36.pyc b/modules/__pycache__/BA_fit.cpython-36.pyc
new file mode 100644
index 0000000..bd8940c
Binary files /dev/null and b/modules/__pycache__/BA_fit.cpython-36.pyc differ
diff --git a/modules/__pycache__/BA_fit.cpython-37.pyc b/modules/__pycache__/BA_fit.cpython-37.pyc
new file mode 100644
index 0000000..f377686
Binary files /dev/null and b/modules/__pycache__/BA_fit.cpython-37.pyc differ
diff --git a/modules/__pycache__/ExtractData.cpython-36.pyc b/modules/__pycache__/ExtractData.cpython-36.pyc
new file mode 100644
index 0000000..314d59d
Binary files /dev/null and b/modules/__pycache__/ExtractData.cpython-36.pyc differ
diff --git a/modules/__pycache__/ExtractData.cpython-37.pyc b/modules/__pycache__/ExtractData.cpython-37.pyc
new file mode 100644
index 0000000..b1566d7
Binary files /dev/null and b/modules/__pycache__/ExtractData.cpython-37.pyc differ
diff --git a/modules/__pycache__/ExtractData.cpython-39.pyc b/modules/__pycache__/ExtractData.cpython-39.pyc
new file mode 100644
index 0000000..e9f426f
Binary files /dev/null and b/modules/__pycache__/ExtractData.cpython-39.pyc differ
diff --git a/modules/__pycache__/FittingPlots.cpython-36.pyc b/modules/__pycache__/FittingPlots.cpython-36.pyc
new file mode 100644
index 0000000..4d283a8
Binary files /dev/null and b/modules/__pycache__/FittingPlots.cpython-36.pyc differ
diff --git a/modules/__pycache__/FittingPlots.cpython-39.pyc b/modules/__pycache__/FittingPlots.cpython-39.pyc
new file mode 100644
index 0000000..23d8f0d
Binary files /dev/null and b/modules/__pycache__/FittingPlots.cpython-39.pyc differ
diff --git a/modules/__pycache__/PlotManager.cpython-36.pyc b/modules/__pycache__/PlotManager.cpython-36.pyc
new file mode 100644
index 0000000..9eb74a5
Binary files /dev/null and b/modules/__pycache__/PlotManager.cpython-36.pyc differ
diff --git a/modules/__pycache__/PlotManager.cpython-39.pyc b/modules/__pycache__/PlotManager.cpython-39.pyc
new file mode 100644
index 0000000..a0e1dad
Binary files /dev/null and b/modules/__pycache__/PlotManager.cpython-39.pyc differ
diff --git a/modules/__pycache__/__init__.cpython-36.pyc b/modules/__pycache__/__init__.cpython-36.pyc
new file mode 100644
index 0000000..af0fcb4
Binary files /dev/null and b/modules/__pycache__/__init__.cpython-36.pyc differ
diff --git a/modules/__pycache__/__init__.cpython-37.pyc b/modules/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000..da900ef
Binary files /dev/null and b/modules/__pycache__/__init__.cpython-37.pyc differ
diff --git a/modules/__pycache__/__init__.cpython-39.pyc b/modules/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000..9c4d169
Binary files /dev/null and b/modules/__pycache__/__init__.cpython-39.pyc differ
diff --git a/modules/__pycache__/analyse_needle.cpython-36.pyc b/modules/__pycache__/analyse_needle.cpython-36.pyc
new file mode 100644
index 0000000..3704a51
Binary files /dev/null and b/modules/__pycache__/analyse_needle.cpython-36.pyc differ
diff --git a/modules/__pycache__/analyse_needle.cpython-39.pyc b/modules/__pycache__/analyse_needle.cpython-39.pyc
new file mode 100644
index 0000000..38b0efb
Binary files /dev/null and b/modules/__pycache__/analyse_needle.cpython-39.pyc differ
diff --git a/modules/__pycache__/circular_fit.cpython-36.pyc b/modules/__pycache__/circular_fit.cpython-36.pyc
new file mode 100644
index 0000000..34f5179
Binary files /dev/null and b/modules/__pycache__/circular_fit.cpython-36.pyc differ
diff --git a/modules/__pycache__/circular_fit.cpython-37.pyc b/modules/__pycache__/circular_fit.cpython-37.pyc
new file mode 100644
index 0000000..e3b2a54
Binary files /dev/null and b/modules/__pycache__/circular_fit.cpython-37.pyc differ
diff --git a/modules/__pycache__/classes.cpython-36.pyc b/modules/__pycache__/classes.cpython-36.pyc
new file mode 100644
index 0000000..5f91dbb
Binary files /dev/null and b/modules/__pycache__/classes.cpython-36.pyc differ
diff --git a/modules/__pycache__/classes.cpython-37.pyc b/modules/__pycache__/classes.cpython-37.pyc
new file mode 100644
index 0000000..aac4b0d
Binary files /dev/null and b/modules/__pycache__/classes.cpython-37.pyc differ
diff --git a/modules/__pycache__/classes.cpython-39.pyc b/modules/__pycache__/classes.cpython-39.pyc
new file mode 100644
index 0000000..da3542f
Binary files /dev/null and b/modules/__pycache__/classes.cpython-39.pyc differ
diff --git a/modules/__pycache__/de_YoungLaplace.cpython-36.pyc b/modules/__pycache__/de_YoungLaplace.cpython-36.pyc
new file mode 100644
index 0000000..b18daf9
Binary files /dev/null and b/modules/__pycache__/de_YoungLaplace.cpython-36.pyc differ
diff --git a/modules/__pycache__/de_YoungLaplace.cpython-37.pyc b/modules/__pycache__/de_YoungLaplace.cpython-37.pyc
new file mode 100644
index 0000000..21a5238
Binary files /dev/null and b/modules/__pycache__/de_YoungLaplace.cpython-37.pyc differ
diff --git a/modules/__pycache__/de_YoungLaplace.cpython-39.pyc b/modules/__pycache__/de_YoungLaplace.cpython-39.pyc
new file mode 100644
index 0000000..6c50411
Binary files /dev/null and b/modules/__pycache__/de_YoungLaplace.cpython-39.pyc differ
diff --git a/modules/__pycache__/ellipse_fit.cpython-36.pyc b/modules/__pycache__/ellipse_fit.cpython-36.pyc
new file mode 100644
index 0000000..f88c294
Binary files /dev/null and b/modules/__pycache__/ellipse_fit.cpython-36.pyc differ
diff --git a/modules/__pycache__/ellipse_fit.cpython-37.pyc b/modules/__pycache__/ellipse_fit.cpython-37.pyc
new file mode 100644
index 0000000..9e44d76
Binary files /dev/null and b/modules/__pycache__/ellipse_fit.cpython-37.pyc differ
diff --git a/modules/__pycache__/extract_profile.cpython-36.pyc b/modules/__pycache__/extract_profile.cpython-36.pyc
new file mode 100644
index 0000000..7afcc78
Binary files /dev/null and b/modules/__pycache__/extract_profile.cpython-36.pyc differ
diff --git a/modules/__pycache__/extract_profile.cpython-37.pyc b/modules/__pycache__/extract_profile.cpython-37.pyc
new file mode 100644
index 0000000..c164f78
Binary files /dev/null and b/modules/__pycache__/extract_profile.cpython-37.pyc differ
diff --git a/modules/__pycache__/extract_profile.cpython-39.pyc b/modules/__pycache__/extract_profile.cpython-39.pyc
new file mode 100644
index 0000000..20d1396
Binary files /dev/null and b/modules/__pycache__/extract_profile.cpython-39.pyc differ
diff --git a/modules/__pycache__/fit_data.cpython-36.pyc b/modules/__pycache__/fit_data.cpython-36.pyc
new file mode 100644
index 0000000..02ec087
Binary files /dev/null and b/modules/__pycache__/fit_data.cpython-36.pyc differ
diff --git a/modules/__pycache__/fit_data.cpython-39.pyc b/modules/__pycache__/fit_data.cpython-39.pyc
new file mode 100644
index 0000000..0501747
Binary files /dev/null and b/modules/__pycache__/fit_data.cpython-39.pyc differ
diff --git a/modules/__pycache__/fits.cpython-36.pyc b/modules/__pycache__/fits.cpython-36.pyc
new file mode 100644
index 0000000..8c693df
Binary files /dev/null and b/modules/__pycache__/fits.cpython-36.pyc differ
diff --git a/modules/__pycache__/fits.cpython-37.pyc b/modules/__pycache__/fits.cpython-37.pyc
new file mode 100644
index 0000000..240419e
Binary files /dev/null and b/modules/__pycache__/fits.cpython-37.pyc differ
diff --git a/modules/__pycache__/generate_data.cpython-36.pyc b/modules/__pycache__/generate_data.cpython-36.pyc
new file mode 100644
index 0000000..b07aa48
Binary files /dev/null and b/modules/__pycache__/generate_data.cpython-36.pyc differ
diff --git a/modules/__pycache__/generate_data.cpython-39.pyc b/modules/__pycache__/generate_data.cpython-39.pyc
new file mode 100644
index 0000000..0c055c4
Binary files /dev/null and b/modules/__pycache__/generate_data.cpython-39.pyc differ
diff --git a/modules/__pycache__/initialise_parameters.cpython-36.pyc b/modules/__pycache__/initialise_parameters.cpython-36.pyc
new file mode 100644
index 0000000..e03c0f7
Binary files /dev/null and b/modules/__pycache__/initialise_parameters.cpython-36.pyc differ
diff --git a/modules/__pycache__/initialise_parameters.cpython-37.pyc b/modules/__pycache__/initialise_parameters.cpython-37.pyc
new file mode 100644
index 0000000..78f4a50
Binary files /dev/null and b/modules/__pycache__/initialise_parameters.cpython-37.pyc differ
diff --git a/modules/__pycache__/initialise_parameters.cpython-39.pyc b/modules/__pycache__/initialise_parameters.cpython-39.pyc
new file mode 100644
index 0000000..232bfce
Binary files /dev/null and b/modules/__pycache__/initialise_parameters.cpython-39.pyc differ
diff --git a/modules/__pycache__/interpolation_function.cpython-36.pyc b/modules/__pycache__/interpolation_function.cpython-36.pyc
new file mode 100644
index 0000000..0fb0195
Binary files /dev/null and b/modules/__pycache__/interpolation_function.cpython-36.pyc differ
diff --git a/modules/__pycache__/interpolation_function.cpython-39.pyc b/modules/__pycache__/interpolation_function.cpython-39.pyc
new file mode 100644
index 0000000..bbb60f4
Binary files /dev/null and b/modules/__pycache__/interpolation_function.cpython-39.pyc differ
diff --git a/modules/__pycache__/jacobian.cpython-36.pyc b/modules/__pycache__/jacobian.cpython-36.pyc
new file mode 100644
index 0000000..cffc85e
Binary files /dev/null and b/modules/__pycache__/jacobian.cpython-36.pyc differ
diff --git a/modules/__pycache__/jacobian.cpython-39.pyc b/modules/__pycache__/jacobian.cpython-39.pyc
new file mode 100644
index 0000000..5ddae41
Binary files /dev/null and b/modules/__pycache__/jacobian.cpython-39.pyc differ
diff --git a/modules/__pycache__/polynomial_fit.cpython-36.pyc b/modules/__pycache__/polynomial_fit.cpython-36.pyc
new file mode 100644
index 0000000..4058739
Binary files /dev/null and b/modules/__pycache__/polynomial_fit.cpython-36.pyc differ
diff --git a/modules/__pycache__/polynomial_fit.cpython-37.pyc b/modules/__pycache__/polynomial_fit.cpython-37.pyc
new file mode 100644
index 0000000..071a41c
Binary files /dev/null and b/modules/__pycache__/polynomial_fit.cpython-37.pyc differ
diff --git a/modules/__pycache__/preprocessing.cpython-36.pyc b/modules/__pycache__/preprocessing.cpython-36.pyc
new file mode 100644
index 0000000..0077ac9
Binary files /dev/null and b/modules/__pycache__/preprocessing.cpython-36.pyc differ
diff --git a/modules/__pycache__/preprocessing.cpython-37.pyc b/modules/__pycache__/preprocessing.cpython-37.pyc
new file mode 100644
index 0000000..8d312c9
Binary files /dev/null and b/modules/__pycache__/preprocessing.cpython-37.pyc differ
diff --git a/modules/__pycache__/read_image.cpython-36.pyc b/modules/__pycache__/read_image.cpython-36.pyc
new file mode 100644
index 0000000..d6877c8
Binary files /dev/null and b/modules/__pycache__/read_image.cpython-36.pyc differ
diff --git a/modules/__pycache__/read_image.cpython-37.pyc b/modules/__pycache__/read_image.cpython-37.pyc
new file mode 100644
index 0000000..9994717
Binary files /dev/null and b/modules/__pycache__/read_image.cpython-37.pyc differ
diff --git a/modules/__pycache__/read_image.cpython-39.pyc b/modules/__pycache__/read_image.cpython-39.pyc
new file mode 100644
index 0000000..53ce537
Binary files /dev/null and b/modules/__pycache__/read_image.cpython-39.pyc differ
diff --git a/modules/__pycache__/select_regions.cpython-36.pyc b/modules/__pycache__/select_regions.cpython-36.pyc
new file mode 100644
index 0000000..3f49900
Binary files /dev/null and b/modules/__pycache__/select_regions.cpython-36.pyc differ
diff --git a/modules/__pycache__/select_regions.cpython-37.pyc b/modules/__pycache__/select_regions.cpython-37.pyc
new file mode 100644
index 0000000..1c5b592
Binary files /dev/null and b/modules/__pycache__/select_regions.cpython-37.pyc differ
diff --git a/modules/__pycache__/select_regions.cpython-39.pyc b/modules/__pycache__/select_regions.cpython-39.pyc
new file mode 100644
index 0000000..691a2f8
Binary files /dev/null and b/modules/__pycache__/select_regions.cpython-39.pyc differ
diff --git a/modules/__pycache__/user_interface.cpython-36.pyc b/modules/__pycache__/user_interface.cpython-36.pyc
new file mode 100644
index 0000000..5289277
Binary files /dev/null and b/modules/__pycache__/user_interface.cpython-36.pyc differ
diff --git a/modules/__pycache__/user_interface.cpython-37.pyc b/modules/__pycache__/user_interface.cpython-37.pyc
new file mode 100644
index 0000000..cf3fd0a
Binary files /dev/null and b/modules/__pycache__/user_interface.cpython-37.pyc differ
diff --git a/modules/__pycache__/user_interface.cpython-39.pyc b/modules/__pycache__/user_interface.cpython-39.pyc
new file mode 100644
index 0000000..c7af29a
Binary files /dev/null and b/modules/__pycache__/user_interface.cpython-39.pyc differ
diff --git a/modules/circular_fit.py b/modules/circular_fit.py
new file mode 100644
index 0000000..a00a881
--- /dev/null
+++ b/modules/circular_fit.py
@@ -0,0 +1,1158 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+"""This code serves as a discrete instance of the circular fit method of
+contact angle analysis.
+
+Circular fit code taken from the most recent version of conan -
+conan-ML_cv1.1/modules/select_regions.py"""
+
+# Circular fit from the most recent version of conan - conan-ML_v1.1/modules/select_regions.py
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import scipy.optimize as opt
+import numba
+from scipy.spatial import distance
+from scipy.integrate import solve_ivp
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib
+import cv2
+import math
+import time
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=False,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if cluster: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 1: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def intersection(center, radius, p1, p2):
+
+ """ find the two points where a secant intersects a circle """
+ x1,y1 = p1
+ x2,y2 = p2
+
+ dx, dy = x2 - x1, y2 - y1
+
+ A = dx**2 + dy**2
+ B = 2 * (dx * (x1 - center[0]) + dy * (y1 - center[1]))
+ C = (x1 - center[0])**2 + (y1 - center[1])**2 - radius**2
+
+ discriminant = B**2 - 4*A*C
+ if discriminant < 0:
+ # No intersection points
+ print('Not a secant!')
+ return None
+ elif discriminant == 0:
+ # One intersection point
+ t = -B / (2*A)
+ x = x1 + t*dx
+ y = y1 + t*dy
+ return ((x, y),(x,y))#np.array([[x, y],[x, y]])
+ else:
+ # Two intersection points
+ t1 = (-B + np.sqrt(discriminant)) / (2*A)
+ t2 = (-B - np.sqrt(discriminant)) / (2*A)
+ x_int_1 = x1 + t1*dx
+ y_int_1 = y1 + t1*dy
+ x_int_2 = x1 + t2*dx
+ y_int_2 = y1 + t2*dy
+ return ((x_int_1, y_int_1),(x_int_2, y_int_2))#np.array([[x1, y1], [x2, y2]])
+
+def circle_closest_point(xp, yp, xc, yc, r, n=1000, display=False):
+ """
+ xp (float): The x-coordinate of the reference point
+ yp (float): The y-coordinate of the reference point
+ xc (float): The x-coordinate of the circles's center.
+ yc (float): The y-coordinate of the circle's center.
+ r (float): The radius of the circle.
+ n (int): The number of discrete points used to draw the circle.
+ display (Boolean): Set to True to output figures and information.
+
+ Returns:
+ The distance between the reference point and the circle edge, and
+ the coordinates of the closest point on the circle edge.
+
+ """
+
+ t = np.linspace(0, 2 * np.pi, n)
+
+ x = xc + r * np.cos(t)
+ y = yc + r * np.sin(t)
+
+ dist = np.sqrt((x - xp) ** 2 + (y - yp) ** 2)
+ idx = list(dist).index(min(dist))
+
+ #ddistdt = ((b ** 2 - a ** 2) * np.cos(t) + a * np.sin(np.deg2rad(th)) * yp - a * np.sin(np.deg2rad(th)) * yc + a * np.cos(np.deg2rad(th)) * xp - a * np.cos(np.deg2rad(th)) * xc) * np.sin(t) + ((-b * np.cos(np.deg2rad(th)) * yp) + b * np.cos(np.deg2rad(th)) * yc + b * np.sin(np.deg2rad(th)) * xp - b * np.sin(np.deg2rad(th)) * xc) * np.cos(t)
+ #idx = np.where(ddistdt[1:] * ddistdt[:-1] < 0)[0] # find zeros
+ #m = (ddistdt[idx + 1] - ddistdt[idx]) / (t[idx + 1] - t[idx]) # slope
+ if display:
+ plt.figure(1)
+ plt.plot(x, y, '-', xp, yp, 'r+', x[idx], y[idx], 'r+')
+ plt.xlabel('x')
+ plt.ylabel('y')
+ plt.title('Circle, Point, and Zeros')
+
+ plt.figure(2)
+ plt.plot(t, dist, 'm.', t[idx], dist[idx], 'cx')
+ plt.xlabel('t')
+ plt.ylabel('Distance')
+ plt.title('Distance Function')
+
+ print(f'xp: {xp}, x[idx]: {x[idx]}')
+ print(f'yp: {yp}, y[idx]: {y[idx]}')
+ print('Error is: ', dist[idx])
+
+ plt.show()
+ plt.close()
+
+ return dist[idx], [x[idx],y[idx]]
+
+def circle_fit_errors(contour, h, k, r, display=False):
+ """
+ Calculates the minimum distance between a point and the edge of a translated circle.
+
+ Parameters:
+ contour (array): The array of x, y coordindate points
+ h (float): The x-coordinate of the circle's center.
+ k (float): The y-coordinate of the circle's center.
+ r (float): The radius of the circle.
+ display (boolean): Set to true to show figures.
+
+ Returns:
+ dictionary: The MAE, MSE, RMSE, and maximum error of the contour as compared against the
+ fitted circle.
+ """
+
+ errors = []
+
+ for point in contour:
+ dist2edge, edge_point = circle_closest_point(point[0], point[1], h, k, r, display=display)
+ errors.append(dist2edge)
+
+ error_measures = {}
+
+ error_measures['MAE'] = sum([abs(error) for error in errors])/len(errors)
+ error_measures['MSE'] = sum([error**2 for error in errors])/len(errors)
+ error_measures['RMSE'] = np.sqrt(sum([error**2 for error in errors])/len(errors))
+ error_measures['Maximum error'] = max(errors)
+
+ return error_measures
+
+def circular_fit_img(img,display=False):
+ """Call this function to perform the circular fit.
+ For best results, peprocessing must be done before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+
+ edges_pts = extract_edges_CV(img) # array of x,y coords where lines are detected
+ #print('bounds :',bounds)
+ #print('lim: ',lim)
+
+ if display:
+ plt.imshow(img)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ drop,CPs = prepare_hydrophobic(edges_pts,display)
+
+ a = [CPs[0][1],(CPs[1][1]-CPs[0][1])/(CPs[1][0]-CPs[0][0])] # of form [first y value of baseline, gradient]
+
+ # define baseline as between the two contact points
+
+ x = drop[:,0]
+ y = drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, img.shape[1],100)
+ baseline_y = slope*baseline_x+c
+
+ # timers
+ fit_preprocessing_time = time.time() - start_time
+ fit_start_time = time.time()
+
+ # Center estimates
+ # x estimate is where between the lowest and highest points of the top section for a hydrophobic drop
+ x_m = min(x)+(max(x)-min(x))/2
+ # for full contour, y estimate is the halfway between max y and min y
+ y_m = min(y) + ((max(y)-min(y))/2)
+
+ # perform circular fit
+ def calc_R(xc, yc):
+ """ calculate the distance of each 2D points from the center (xc, yc) """
+ return np.sqrt((x-xc)**2 + (y-yc)**2)
+
+ def f_2(c):
+ """ calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """
+ Ri = calc_R(*c)
+ return Ri - Ri.mean()
+
+ center_estimate = x_m, y_m
+ center_2, ier = opt.leastsq(f_2, center_estimate)
+
+ xc_2, yc_2 = center_2
+ #Ri_2 = calc_R(*center_2)
+ Ri_2 = np.sqrt((x-xc_2)**2 + (y-yc_2)**2)
+ R_2 = Ri_2.mean()
+ residu_2 = sum((Ri_2 - R_2)**2)
+
+ if 1:# round circle parameters so that it works for pixelated points
+ center_2 = [round(xc_2),round(yc_2)]
+ R_2 = round(R_2)
+
+ if display:
+ print('center: ',center_2)
+ print('radius: ',R_2)
+ print('contact points: ',CPs)
+
+ a,b = intersection(center_2,R_2,CPs[0],CPs[1])
+ intercepts = [a,b]
+ intercepts = sorted(list(intercepts), key=lambda x: x[0])
+
+ #print('baseline y value: ',baseline[0][1])
+ #print('a,b: ',a,b)
+
+ CA = []
+ for CP_2 in intercepts:
+ first = True
+ CPx_2 = CP_2[0]
+ CPy_2 = CP_2[1]
+ # vector from midpoint to intersect
+ Vx = CPx_2 - xc_2
+ Vy = CPy_2 - yc_2
+
+ #slope of vector
+ mV = Vy/Vx
+ #print(mV)
+
+ #normal vector slope
+ norm = -1/mV
+ #print('norm is: ',norm)
+ m = norm-slope
+ #print('true norm is: ',norm)
+
+ if CP_2[1] > center_2[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+abs(np.arctan(m))))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif CP_2[1] > center_2[1] and first == False: #high CA angle right
+ #CA.append(math.degrees(abs(np.arctan(m))))
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif CP_2[1] < center_2[1] and first == True: #low CA angle left
+ CA.append(abs(np.rad2deg(np.arctan(m))))
+ elif CP_2[1] < center_2[1] and first == False: #low CA angle right
+ CA.append(abs(np.rad2deg(np.pi+np.arctan(m))))
+ else:
+ CA.append(90)
+
+ first = False
+
+ fit_time = time.time() - fit_start_time
+
+ errors = circle_fit_errors(drop,center_2[0],center_2[1],R_2)
+
+ if display: # show fitted circle
+ circle1 = plt.Circle((center_2[0],center_2[1]), 2, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle((center_2[0],center_2[1]), R_2, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.imshow(img)
+ plt.title("fitted circle \nRMSE of "+str(errors['RMSE']))
+ plt.plot(baseline_x,baseline_y,'r')
+ plt.plot(CPs[0][0],CPs[0][1],'o',color='green', label='Left CP')
+ plt.plot(CPs[1][0],CPs[1][1],'o',color='orange', label='Right CP')
+ plt.plot(intercepts[0][0],intercepts[0][1], 'o', color='magenta', label='Intercepts')
+ plt.plot(intercepts[1][0],intercepts[1][1], 'o', color='magenta')
+ plt.legend()
+ plt.show()
+ plt.close()
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = fit_preprocessing_time
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, center_2, R_2, intercepts, errors, timings
+
+def circular_fit(drop,display=False):
+ """Call this function to perform the circular fit.
+ For best results, peprocessing must be done before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+ CPs = [drop[0],drop[-1]]
+ a = [CPs[0][1],(CPs[1][1]-CPs[0][1])/(CPs[1][0]-CPs[0][0])] # of form [first y value of baseline, gradient]
+
+ # define baseline as between the two contact points
+
+ x, y = drop[:,0], drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, max(drop[:,0]),100)
+ baseline_y = slope*baseline_x+c
+
+ # Center estimates
+ # x estimate is where between the lowest and highest points of the top section for a hydrophobic drop
+ x_m = min(x)+(max(x)-min(x))/2
+ # for full contour, y estimate is the halfway between max y and min y
+ y_m = min(y) + ((max(y)-min(y))/2)
+
+ # perform circular fit
+ def calc_R(xc, yc):
+ """ calculate the distance of each 2D points from the center (xc, yc) """
+ return np.sqrt((x-xc)**2 + (y-yc)**2)
+
+ def f_2(c):
+ """ calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """
+ Ri = calc_R(*c)
+ return Ri - Ri.mean()
+
+ center_estimate = x_m, y_m
+ center_2, ier = opt.leastsq(f_2, center_estimate)
+
+ xc_2, yc_2 = center_2
+ #Ri_2 = calc_R(*center_2)
+ Ri_2 = np.sqrt((x-xc_2)**2 + (y-yc_2)**2)
+ R_2 = Ri_2.mean()
+ residu_2 = sum((Ri_2 - R_2)**2)
+
+ if 1:# round circle parameters so that it works for pixelated points
+ center_2 = [round(xc_2),round(yc_2)]
+ R_2 = round(R_2)
+
+ if display:
+ print('center: ',center_2)
+ print('radius: ',R_2)
+ print('contact points: ',CPs)
+
+ a,b = intersection(center_2,R_2,CPs[0],CPs[1])
+ intercepts = [a,b]
+ intercepts = sorted(list(intercepts), key=lambda x: x[0])
+
+ #print('baseline y value: ',baseline[0][1])
+ #print('intercepts: ',intercepts)
+
+ CA = []
+ for CP_2 in intercepts:
+ first = True
+ CPx_2 = CP_2[0]
+ CPy_2 = CP_2[1]
+ # vector from midpoint to intersect
+ Vx = CPx_2 - xc_2
+ Vy = CPy_2 - yc_2
+
+ #slope of vector
+ mV = Vy/Vx
+ #print(mV)
+
+ #normal vector slope
+ norm = -1/mV
+ #print('norm is: ',norm)
+ m = norm-slope
+ #print('true norm is: ',norm)
+ #print('m: ', m)
+
+ if CP_2[1] > center_2[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+abs(np.arctan(m))))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif CP_2[1] > center_2[1] and first == False: #high CA angle right
+ #CA.append(math.degrees(abs(np.arctan(m))))
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif CP_2[1] < center_2[1] and first == True: #low CA angle left
+ CA.append(abs(np.rad2deg(np.arctan(m))))
+ elif CP_2[1] < center_2[1] and first == False: #low CA angle right
+ CA.append(abs(np.rad2deg(np.pi+np.arctan(m))))
+ else:
+ CA.append(90)
+
+ first = False
+
+ fit_time = time.time() - start_time
+
+ errors = circle_fit_errors(drop,center_2[0],center_2[1],R_2)
+
+ if display: # show fitted circle
+ circle1 = plt.Circle((center_2[0],center_2[1]), 2, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle((center_2[0],center_2[1]), R_2, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.title("fitted circle \nRMSE of "+str(errors['RMSE']))
+ plt.plot(baseline_x,baseline_y,'r')
+ plt.plot(CPs[0][0],CPs[0][1],'o',color='green', label='Left CP')
+ plt.plot(CPs[1][0],CPs[1][1],'o',color='orange', label='Right CP')
+ plt.plot(intercepts[0][0],intercepts[0][1], 'o', color='magenta', label='Intercepts')
+ plt.plot(intercepts[1][0],intercepts[1][1], 'o', color='magenta')
+ plt.legend()
+ plt.gca().invert_yaxis()
+ plt.show()
+ plt.close()
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, center_2, R_2, intercepts, errors, timings
+
+
+if 0:
+ IMG_PATH = '../RICOphobic_cropped.png'
+ img = cv2.imread(IMG_PATH)
+ angles, centre, R_2, intercepts, errors, timings = circular_fit_img(img,display=False)
+
+ if 1:
+ print('angles: ', angles)
+ print('circle centre: ', centre)
+ print('circle radius: ', R_2)
+ print('intercept points: ', intercepts)
+ print('errors: ', errors)
+ print('timings: ', timings)
+
+ print('done')
diff --git a/modules/classes.py b/modules/classes.py
new file mode 100644
index 0000000..7220161
--- /dev/null
+++ b/modules/classes.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python
+# coding=utf-8
+from .de_YoungLaplace import ylderiv
+from .de_YoungLaplace import dataderiv
+#from .interpolation_function import cubic_interpolation_function
+from scipy.integrate import odeint
+
+import numpy as np
+
+class Tolerances(object):
+ def __init__(self, delta_tol, gradient_tol, maximum_fitting_steps, objective_tol, arclength_tol, maximum_arclength_steps, needle_tol, needle_steps):
+ self.DELTA_TOL = delta_tol
+ self.GRADIENT_TOL = gradient_tol
+ self.MAXIMUM_FITTING_STEPS = maximum_fitting_steps
+ self.OBJECTIVE_TOL = objective_tol
+ self.ARCLENGTH_TOL = arclength_tol
+ self.MAXIMUM_ARCLENGTH_STEPS = maximum_arclength_steps
+ self.NEEDLE_TOL = needle_tol
+ self.NEEDLE_STEPS = needle_steps
+
+
+# class ExperimentalSetup(object):
+# def __init__(self):
+# self.density_drop = None
+# self.density_outer = None
+# self.needle_diameter_m = None
+# self.plot_residuals = None
+# self.plot_profiles = None
+# self.plot_IFT = None
+# self.img_src = None
+# self.total_images = None
+# self.wait_time = None
+# self.save_images = None
+# self.filename = None
+
+class ExperimentalSetup(object):
+ def __init__(self):
+ self.screen_resolution = None
+ self.drop_ID_method = None
+ self.threshold_method = None
+ self.threshold_val = None
+ self.baseline_method = None
+ self.edgefinder = None
+ self.continuous_density = None
+ self.needle_diameter_mm = None
+ self.residuals_boole = None
+ self.profiles_boole = None
+ self.interfacial_tension_boole = None
+ self.image_source = None
+ self.number_of_frames = None
+ self.wait_time = None
+ self.save_images_boole = None
+ self.filename = None
+ self.time_string = None
+ self.local_files = None
+ self.drop_region = None
+ self.needle_region = None
+
+class ExperimentalDrop(object):
+ def __init__(self):
+ self.image = None
+ self.cropped_image = None
+ self.contour = None
+ self.drop_contour = None
+ self.contact_points = None
+ self.contact_angles = {}
+ self.needle_data = None
+ self.surface_data = None
+ self.ret = None
+ self.time = None
+ self.pixels_to_mm = None
+ # self.time_full = None
+ # self.filename = None
+ # self.img_src = 2
+
+
+class DropData(object):
+ def __init__(self):
+ self.previous_guess = None
+ self.previous_params = None
+ self._params = None
+ self._max_s = None
+ self._s_points = 200
+ self.theoretical_data = None
+ self.parameter_dimensions = 5
+ self.residuals = None
+ self.arc_lengths = None
+ # self.fitted = False
+ self.needle_diameter_pixels = None
+ self.s_left = None
+ self.s_right = None
+ # self.s_0 = None
+ # self.start_time = None
+ # # self.rho_drop = None
+ # # self.rho_outer = None
+ # # # capPX = 206 # radius of the capillary in pixels
+ # # # capM = 1.651 / 1000. # radius of capillary in meters
+ # # # pix2m = capM / capPX
+ # self.rho_drop = 1000.
+ # self.rho_outer = 0.
+ # self.pix2m = 0.000008
+ # self.needle_diameter_pixels = None
+ # self.needle_diameter_m = None
+ # self.number_experiments = None
+ # self.wait_time = None
+
+
+ if 0:# interpolates the theoretical profile data
+ def profile(self, s):
+ if (s < 0):
+ raise ValueError("s value outside domain")
+ if (s > self.max_s):
+ # if the profile is called outside of the current region, expand
+ self.max_s = 1.2 * s # expand region to include s_max
+ Delta_s = self.max_s / self.s_points
+ n1 = int(s / Delta_s)
+ n2 = n1 + 1
+ t = s / Delta_s - n1
+ vec1 = np.array(self.theoretical_data[n1])
+ vec2 = np.array(self.theoretical_data[n2])
+ bond_number = self.bond()
+ Dvec1 = np.array(ylderiv(vec1, 0, bond_number))
+ Dvec2 = np.array(ylderiv(vec2, 0, bond_number))
+ value_at_s = cubic_interpolation_function(vec1, vec2, Dvec1, Dvec2, Delta_s, t)
+ return value_at_s
+
+ # generates a new drop profile
+ def generate_profile_data(self):
+ if (self._max_s is not None) and (self._s_points is not None) and (self._params is not None):
+ # if [self.max_s, self.s_points, self.params].all():
+ # self.fitted = False
+ # s_data_points = np.arange(0, self.max_s*(1+2/self.s_points), self.max_s/self.s_points)
+ s_data_points = np.linspace(0, self.max_s, self.s_points + 1)
+
+ x_vec_initial = [.000001, 0., 0., 0., 0., 0.]
+ bond_number = self.bond()
+ self.theoretical_data = odeint(ylderiv, x_vec_initial, s_data_points, args=(bond_number,))
+
+ # # generates a new drop profile
+ # def generate_profile_volume_area_data(self):
+ # s_data_points = np.linspace(0, self.max_s, self.s_points + 1)
+ # x_vec_initial = [.000001, 0., 0., 0., 0.]
+ # bond_number = self.bond()
+ # self.original_data = odeint(dataderiv, x_vec_initial, s_data_points, args=(bond_number,))[-1,-2:]
+
+ # # interpolates the theoretical profile data
+ # def profile(self):
+ # s_needle = self.determine
+ # Delta_s = self.s_needle() / self.s_points
+ # n1 = int(s / Delta_s)
+ # n2 = n1 + 1
+ # t = s / Delta_s - n1
+ # vec1 = np.array(self.theoretical_data[n1])
+ # vec2 = np.array(self.theoretical_data[n2])
+ # bond_number = self.bond()
+ # Dvec1 = np.array(ylderiv(vec1, 0, bond_number))
+ # Dvec2 = np.array(ylderiv(vec2, 0, bond_number))
+ # value_at_s = cubic_interpolation_function(vec1, vec2, Dvec1, Dvec2, Delta_s, t)
+ # return value_at_s
+
+ # def s_needle(self):
+ # return 100
+
+
+ # generate new profile when params are changed
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, vector):
+ if len(vector) != self.parameter_dimensions:
+ raise ValueError("Parameter array incorrect dimensions")
+ self._params = vector
+ self.generate_profile_data() # generate new profile when the parameters are changed
+
+ # generate new profile when max_s is changed
+ @property
+ def max_s(self):
+ return self._max_s
+
+ @max_s.setter
+ def max_s(self, value):
+ if value <= 0:
+ raise ValueError("Maximum arc length must be positive")
+ self._max_s = float(value)
+ self.generate_profile_data() # generate new profile when the maximum arc length is changed
+
+ # test validity of variable s_points + generate new profile when s_points are
+ @property
+ def s_points(self):
+ return self._s_points
+
+ @s_points.setter
+ def s_points(self, value):
+ if value <= 1:
+ raise ValueError("Number of points must be positive")
+ if not isinstance(value, int):
+ raise ValueError("Number of points must be an integer")
+ self._s_points = value
+ self.generate_profile_data() # generate new profile when the maximum arc length is changed
+
+ # def calculate_interfacial_tension(self):
+ # if self.fitted:
+ # GRAVITY = 9.80035 # gravitational acceleration in Melbourne, Australia
+ # D_rho = self.rho_drop - self.rho_outer
+ # a_radius = self.apex_radius() * self.pix2m
+ # Bond = self.bond()
+ # gamma_IFT = D_rho * GRAVITY * a_radius**2 / Bond
+ # # return gamma_IFT
+ # gamma_IFT_mN = 1000 * gamma_IFT
+ # return gamma_IFT_mN
+ # else:
+ # print('ERROR: drop profile not yet fitted')
+ # return None
+
+ # returns the Bond number
+ def bond(self):
+ return self.params[3]
+
+ # returns the apex radius
+ def apex_radius(self):
+ return self.params[2]
+
+ # # returns the pixel to meter conversion
+ # def pixel_to_mm(self):
+ # pix2m = self.needle_diameter_mm / self.needle_diemeter_pixels
+ # return pix2m
+
+ # # returns the pixel to meter conversion
+ # def fitted_vol_area(self):
+ # s_needle = self.max_s
+ # s_data_points = np.linspace(0, s_needle, self.s_points + 1)
+ # # EPS = .000001 # need to use Bessel function Taylor expansion below
+ # bond_number = self.bond()
+ # x_vec_initial = [.000001, 0., 0., 0., 0.]
+ # vol_sur = odeint(dataderiv, x_vec_initial, s_data_points, args=(bond_number,))[-1,-2:]
+ # return vol_sur
diff --git a/modules/classes.pyc b/modules/classes.pyc
new file mode 100644
index 0000000..a19d2a4
Binary files /dev/null and b/modules/classes.pyc differ
diff --git a/modules/classes.py~ b/modules/classes.py~
new file mode 100644
index 0000000..5392610
--- /dev/null
+++ b/modules/classes.py~
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+# coding=utf-8
+from .de_YoungLaplace import ylderiv
+from .de_YoungLaplace import dataderiv
+from interpolation_function import cubic_interpolation_function
+from scipy.integrate import odeint
+
+import numpy as np
+
+class Tolerances(object):
+ def __init__(self, delta_tol, gradient_tol, maximum_fitting_steps, objective_tol, arclength_tol, maximum_arclength_steps, needle_tol, needle_steps):
+ self.DELTA_TOL = delta_tol
+ self.GRADIENT_TOL = gradient_tol
+ self.MAXIMUM_FITTING_STEPS = maximum_fitting_steps
+ self.OBJECTIVE_TOL = objective_tol
+ self.ARCLENGTH_TOL = arclength_tol
+ self.MAXIMUM_ARCLENGTH_STEPS = maximum_arclength_steps
+ self.NEEDLE_TOL = needle_tol
+ self.NEEDLE_STEPS = needle_steps
+
+
+# class ExperimentalSetup(object):
+# def __init__(self):
+# self.density_drop = None
+# self.density_outer = None
+# self.needle_diameter_m = None
+# self.plot_residuals = None
+# self.plot_profiles = None
+# self.plot_IFT = None
+# self.img_src = None
+# self.total_images = None
+# self.wait_time = None
+# self.save_images = None
+# self.filename = None
+
+class ExperimentalSetup(object):
+ def __init__(self):
+ self.screen_resolution = None
+ self.threshold_val = None
+ self.continuous_density = None
+ self.needle_diameter_mm = None
+ self.residuals_boole = None
+ self.profiles_boole = None
+ self.interfacial_tension_boole = None
+ self.image_source = None
+ self.number_of_frames = None
+ self.wait_time = None
+ self.save_images_boole = None
+ self.filename = None
+ self.time_string = None
+ self.local_files = None
+ self.drop_region = None
+ self.needle_region = None
+
+class ExperimentalDrop(object):
+ def __init__(self):
+ self.image = None
+ self.drop_data = None
+ self.contact_angles = None
+ self.needle_data = None
+ self.surface_data = None
+ self.ret = None
+ self.time = None
+ self.pixels_to_mm = None
+ # self.time_full = None
+ # self.filename = None
+ # self.img_src = 2
+
+
+class DropData(object):
+ def __init__(self):
+ self.previous_guess = None
+ self.previous_params = None
+ self._params = None
+ self._max_s = None
+ self._s_points = 200
+ self.theoretical_data = None
+ self.parameter_dimensions = 5
+ self.residuals = None
+ self.arc_lengths = None
+ # self.fitted = False
+ self.needle_diameter_pixels = None
+ self.s_left = None
+ self.s_right = None
+ # self.s_0 = None
+ # self.start_time = None
+ # # self.rho_drop = None
+ # # self.rho_outer = None
+ # # # capPX = 206 # radius of the capillary in pixels
+ # # # capM = 1.651 / 1000. # radius of capillary in meters
+ # # # pix2m = capM / capPX
+ # self.rho_drop = 1000.
+ # self.rho_outer = 0.
+ # self.pix2m = 0.000008
+ # self.needle_diameter_pixels = None
+ # self.needle_diameter_m = None
+ # self.number_experiments = None
+ # self.wait_time = None
+
+
+ # interpolates the theoretical profile data
+ def profile(self, s):
+ if (s < 0):
+ raise ValueError("s value outside domain")
+ if (s > self.max_s):
+ # if the profile is called outside of the current region, expand
+ self.max_s = 1.2 * s # expand region to include s_max
+ Delta_s = self.max_s / self.s_points
+ n1 = int(s / Delta_s)
+ n2 = n1 + 1
+ t = s / Delta_s - n1
+ vec1 = np.array(self.theoretical_data[n1])
+ vec2 = np.array(self.theoretical_data[n2])
+ bond_number = self.bond()
+ Dvec1 = np.array(ylderiv(vec1, 0, bond_number))
+ Dvec2 = np.array(ylderiv(vec2, 0, bond_number))
+ value_at_s = cubic_interpolation_function(vec1, vec2, Dvec1, Dvec2, Delta_s, t)
+ return value_at_s
+
+ # generates a new drop profile
+ def generate_profile_data(self):
+ if (self._max_s is not None) and (self._s_points is not None) and (self._params is not None):
+ # if [self.max_s, self.s_points, self.params].all():
+ # self.fitted = False
+ # s_data_points = np.arange(0, self.max_s*(1+2/self.s_points), self.max_s/self.s_points)
+ s_data_points = np.linspace(0, self.max_s, self.s_points + 1)
+
+ x_vec_initial = [.000001, 0., 0., 0., 0., 0.]
+ bond_number = self.bond()
+ self.theoretical_data = odeint(ylderiv, x_vec_initial, s_data_points, args=(bond_number,))
+
+ # # generates a new drop profile
+ # def generate_profile_volume_area_data(self):
+ # s_data_points = np.linspace(0, self.max_s, self.s_points + 1)
+ # x_vec_initial = [.000001, 0., 0., 0., 0.]
+ # bond_number = self.bond()
+ # self.original_data = odeint(dataderiv, x_vec_initial, s_data_points, args=(bond_number,))[-1,-2:]
+
+ # # interpolates the theoretical profile data
+ # def profile(self):
+ # s_needle = self.determine
+ # Delta_s = self.s_needle() / self.s_points
+ # n1 = int(s / Delta_s)
+ # n2 = n1 + 1
+ # t = s / Delta_s - n1
+ # vec1 = np.array(self.theoretical_data[n1])
+ # vec2 = np.array(self.theoretical_data[n2])
+ # bond_number = self.bond()
+ # Dvec1 = np.array(ylderiv(vec1, 0, bond_number))
+ # Dvec2 = np.array(ylderiv(vec2, 0, bond_number))
+ # value_at_s = cubic_interpolation_function(vec1, vec2, Dvec1, Dvec2, Delta_s, t)
+ # return value_at_s
+
+ # def s_needle(self):
+ # return 100
+
+
+ # generate new profile when params are changed
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, vector):
+ if len(vector) != self.parameter_dimensions:
+ raise ValueError("Parameter array incorrect dimensions")
+ self._params = vector
+ self.generate_profile_data() # generate new profile when the parameters are changed
+
+ # generate new profile when max_s is changed
+ @property
+ def max_s(self):
+ return self._max_s
+
+ @max_s.setter
+ def max_s(self, value):
+ if value <= 0:
+ raise ValueError("Maximum arc length must be positive")
+ self._max_s = float(value)
+ self.generate_profile_data() # generate new profile when the maximum arc length is changed
+
+ # test validity of variable s_points + generate new profile when s_points are
+ @property
+ def s_points(self):
+ return self._s_points
+
+ @s_points.setter
+ def s_points(self, value):
+ if value <= 1:
+ raise ValueError("Number of points must be positive")
+ if not isinstance(value, int):
+ raise ValueError("Number of points must be an integer")
+ self._s_points = value
+ self.generate_profile_data() # generate new profile when the maximum arc length is changed
+
+ # def calculate_interfacial_tension(self):
+ # if self.fitted:
+ # GRAVITY = 9.80035 # gravitational acceleration in Melbourne, Australia
+ # D_rho = self.rho_drop - self.rho_outer
+ # a_radius = self.apex_radius() * self.pix2m
+ # Bond = self.bond()
+ # gamma_IFT = D_rho * GRAVITY * a_radius**2 / Bond
+ # # return gamma_IFT
+ # gamma_IFT_mN = 1000 * gamma_IFT
+ # return gamma_IFT_mN
+ # else:
+ # print('ERROR: drop profile not yet fitted')
+ # return None
+
+ # returns the Bond number
+ def bond(self):
+ return self.params[3]
+
+ # returns the apex radius
+ def apex_radius(self):
+ return self.params[2]
+
+ # # returns the pixel to meter conversion
+ # def pixel_to_mm(self):
+ # pix2m = self.needle_diameter_mm / self.needle_diemeter_pixels
+ # return pix2m
+
+ # # returns the pixel to meter conversion
+ # def fitted_vol_area(self):
+ # s_needle = self.max_s
+ # s_data_points = np.linspace(0, s_needle, self.s_points + 1)
+ # # EPS = .000001 # need to use Bessel function Taylor expansion below
+ # bond_number = self.bond()
+ # x_vec_initial = [.000001, 0., 0., 0., 0.]
+ # vol_sur = odeint(dataderiv, x_vec_initial, s_data_points, args=(bond_number,))[-1,-2:]
+ # return vol_sur
+
+
+
+
+
diff --git a/modules/de_YoungLaplace.py b/modules/de_YoungLaplace.py
new file mode 100644
index 0000000..536f011
--- /dev/null
+++ b/modules/de_YoungLaplace.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+from math import pi
+from numpy import sin, cos
+
+# minimise calls to sin() and cos()
+# defines the Young--Laplace system of differential equations to be solved
+def ylderiv(x_vec, t, bond_number):
+ x, y, phi, x_Bond, y_Bond, phi_Bond = x_vec
+ x_s = cos(phi)
+ y_s = sin(phi)
+ phi_s = 2 + bond_number * y - y_s/x #DS 16/09 2- made 2+ for contact angle
+ x_Bond_s = - y_s * phi_Bond
+ y_Bond_s = x_s * phi_Bond
+ phi_Bond_s = y_s * x_Bond / (x*x) - x_s * phi_Bond / x - y - bond_number * y_Bond
+ return [x_s, y_s, phi_s, x_Bond_s, y_Bond_s, phi_Bond_s]
+
+# defines the Young--Laplace system of differential equations to be solved
+def dataderiv(x_vec, t, bond_number):
+ x, y, phi, vol, sur = x_vec
+ x_s = cos(phi)
+ y_s = sin(phi)
+ phi_s = 2 - bond_number * y - sin(phi)/x
+ vol_s = pi * x**2 * y_s
+ sur_s = 2 * pi * x
+ return [x_s, y_s, phi_s, vol_s, sur_s]
+
+# # defines the Young--Laplace system of differential equations to be solved
+# def ylderiv(x_vec, t):
+# x, y, phi, x_Bond, y_Bond, phi_Bond = x_vec
+# x_s = cos(phi)
+# y_s = sin(phi)
+# phi_s = 2 - bond_number * y - sin(phi)/x
+# x_Bond_s = -sin(phi)*phi_Bond
+# y_Bond_s = cos(phi)*phi_Bond
+# phi_Bond_s = sin(phi) * x_Bond / (x*x) - cos(phi) * phi_Bond / x - y - bond_number * y_Bond
+# return [x_s, y_s, phi_s, x_Bond_s, y_Bond_s, phi_Bond_s]
diff --git a/modules/de_YoungLaplace.pyc b/modules/de_YoungLaplace.pyc
new file mode 100644
index 0000000..beea11b
Binary files /dev/null and b/modules/de_YoungLaplace.pyc differ
diff --git a/modules/ellipse_fit.py b/modules/ellipse_fit.py
new file mode 100644
index 0000000..591ec21
--- /dev/null
+++ b/modules/ellipse_fit.py
@@ -0,0 +1,1222 @@
+"""This code serves as a discrete instance of the elipse fit method of
+contact angle analysis.
+
+This is base on the circular fit code taken from the most recent version
+of conan - conan-ML_cv1.1/modules/select_regions.py"""
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import scipy.optimize as opt
+import numba
+from scipy.spatial import distance
+from scipy.integrate import solve_ivp
+import numpy as np
+import matplotlib.pyplot as plt
+import cv2
+from matplotlib.patches import Ellipse
+import math
+import time
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=False,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if cluster: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 1: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def rot( a ):
+ """
+ simple rotation matrix in 2D
+ """
+ return np.array(
+ [ [ +np.cos( a ), -np.sin( a ) ],
+ [ +np.sin( a ), +np.cos( a ) ] ]
+ )
+
+def fit_ellipse( x, y ):
+ """
+ main fit from the original publication:
+ http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
+ """
+ x = x[ :, np.newaxis ]
+ y = y[ :, np.newaxis ]
+ D = np.hstack( ( x * x, x * y, y * y, x, y, np.ones_like( x ) ) )
+ S = np.dot( D.T, D )
+ C = np.zeros( [ 6, 6 ] )
+ C[ 0, 2 ] = +2
+ C[ 2, 0 ] = +2
+ C[ 1, 1 ] = -1
+ E, V = np.linalg.eig( np.dot( np.linalg.inv( S ), C ) )
+ n = np.argmax( np.abs( E ) )
+ #n = np.argmax( E )
+ a = V[ :, n ]
+ return a
+
+def ell_parameters( a ):
+ """
+ New function substituting the original 3 functions for
+ axis, centre and angle.
+ We start by noting that the linear term is due to an offset.
+ Getting rid of it is equivalent to find the offset.
+ Starting with the Eq.
+ xT A x + bT x + c = 0 and transforming x -> x - t
+ we get a new linear term. By demanding that this term vanishes
+ we get the Eq.
+ b = (AT + A ) t.
+ Hence, an easy way to write down how to get t
+ """
+ RAD = 180. / np.pi
+ DEGREE = 1. / RAD
+
+ A = np.array( [ [ a[0], a[1]/2. ], [ a[1]/2., a[2] ] ] )
+ b = np.array( [ a[3], a[4] ] )
+ t = np.dot( np.linalg.inv( np.transpose( A ) + A ), b )
+ """
+ the transformation changes the constant term, which we need
+ for proper scaling
+ """
+ c = a[5]
+ cnew = c - np.dot( t, b ) + np.dot( t, np.dot( A, t ) )
+ Anew = A / (-cnew)
+ # ~cnew = cnew / (-cnew) ### debug only
+ """
+ now it is in the form xT A x - 1 = 0
+ and we know that A is a rotation of the matrix
+ ( 1 / a² 0 )
+ B = ( )
+ ( 0 1 / b² )
+ where a and b are the semi axes of the ellipse
+ it is hence A = ST B S
+ We note that rotation does not change the eigenvalues, which are
+ the diagonal elements of matrix B. Moreover, we note that
+ the matrix of eigenvectors rotates B into A
+ """
+ E, V = np.linalg.eig( Anew )
+ """
+ so we have
+ B = VT A V
+ and consequently
+ A = V B VT
+ where V is of a form as given by the function rot() from above
+ """
+ # ~B = np.dot( np.transpose(V), np.dot( Anew, V ) ) ### debug only
+ phi = np.arccos( V[ 0, 0 ] )
+ """
+ checking the sin for changes in sign to detect angles above 180°
+ """
+ if V[ 0, 1 ] < 0:
+ phi = 2 * np.pi - phi
+ ### cw vs ccw and periodicity of pi
+ phi = -phi % np.pi
+
+# for i in range(len(E)):
+# if E[i]<0:
+# E[i]=0.1
+
+ return np.sqrt( 1. / E ), phi * RAD, -t
+ """
+ That's it. One might put some additional work/thought in the 180°
+ and cw vs ccw thing, as it is a bit messy.
+ """
+
+def ellipse_line_intersection(xc, yc, a, b, theta, x0, y0, x1, y1, display=False):
+ """
+ Finds the intersection between an ellipse and a line defined by two points.
+
+ Parameters:
+ xc (float): X-coordinate of the center of the ellipse.
+ yc (float): Y-coordinate of the center of the ellipse.
+ a (float): Length of the semi-major axis of the ellipse.
+ b (float): Length of the semi-minor axis of the ellipse.
+ theta (float): Angle of rotation of the ellipse in radians.
+ x0 (float): X-coordinate of the first point along the line.
+ y0 (float): Y-coordinate of the first point along the line.
+ x1 (float): X-coordinate of the second point along the line.
+ y1 (float): Y-coordinate of the second point along the line.
+ display (Boolean): Set to True to show figures
+
+ Returns:
+ tuple: A tuple containing two tuples, where each inner tuple contains the X and Y coordinates of the intercept and the gradient of the ellipse at the intercept.
+
+ """
+ # Plot the ellipse and the baseline
+ if display:
+ ell = Ellipse(
+ [xc,yc], 2 * a, 2 * b, theta,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ))
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.plot(x0, y0, 'yo')
+ ax.plot(x1, y1, 'go')
+ ax.plot()
+ plt.title('Drawn ellipse with line points in green and yellow')
+ plt.gca().invert_yaxis()
+ plt.show()
+ plt.close()
+
+ # Calculate the slope and intercept of the line
+ m = (y1 - y0) / (x1 - x0)
+ c = y0 - m * x0
+
+ # equation for ellipse in quadratic coefficient form
+ A = np.sin(theta)**2/b**2 + np.cos(theta)**2/a**2
+ B = -2*np.sin(theta)*np.cos(theta)/b**2 + 2*np.sin(theta)*np.cos(theta)/a**2
+ C = np.cos(theta)**2/b**2 + np.sin(theta)**2/a**2
+ D = - 2*xc*np.sin(theta)**2/b**2 + 2*yc*np.sin(theta)*np.cos(theta)/b**2 - 2*xc*np.cos(theta)**2/a**2 - 2*yc*np.sin(theta)*np.cos(theta)/a**2
+ E = 2*xc*np.sin(theta)*np.cos(theta)/b**2 - 2*yc*np.cos(theta)**2/b**2 - 2*xc*np.sin(theta)*np.cos(theta)/a**2 - 2*yc*np.sin(theta)**2/a**2
+ F = -1 + xc**2*np.sin(theta)**2/b**2 - 2*xc*yc*np.sin(theta)*np.cos(theta)/b**2 + yc**2*np.cos(theta)**2/b**2 + xc**2*np.cos(theta)**2/a**2 + 2*xc*yc*np.sin(theta)*np.cos(theta)/a**2 + yc**2*np.sin(theta)**2/a**2
+
+ # sub (mx + b) in for y, expand and simplify
+ A_new = A + B*m + C*m**2
+ B_new = B*c + 2*C*m*c + D + E*m
+ C_new = C*c**2 + E*c + F
+
+ # Calculate the discriminant of the quadratic equation
+ disc = B_new**2 - 4 * A_new * C_new
+
+ # If the discriminant is negative, there are no intercepts
+ if disc < 0:
+ print("The line does not intersect the ellipse.")
+ return None
+
+ # If the discriminant is zero, there is one intercept
+ elif disc == 0:
+ x_int = -B_new / (2 * A_new)
+ y_int = m * x_int + c
+ x_int_rot = x_int * np.cos(theta) - y_int * np.sin(theta)
+ y_int_rot = x_int * np.sin(theta) + y_int * np.cos(theta)
+ grad = -(np.cos(theta)**2 * x_int_rot) / (np.sin(theta)**2 * y_int_rot)
+
+ return ((x_int, y_int, grad),)
+
+ # If the discriminant is positive, there are two intercepts
+ else:
+ x_int_1 = (-B_new + np.sqrt(disc)) / (2 * A_new)
+ x_int_2 = (-B_new - np.sqrt(disc)) / (2 * A_new)
+ y_int_1 = m * x_int_1 + c
+ y_int_2 = m * x_int_2 + c
+
+ grad_1 = -(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_1)*np.cos(theta))*np.sin(theta)/b**2 + 2*((-xc + x_int_1)*np.cos(theta) + (-yc + y_int_1)*np.sin(theta))*np.cos(theta)/a**2)/(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_1)*np.cos(theta))*np.cos(theta)/b**2 + 2*((-xc + x_int_1)*np.cos(theta) + (-yc + y_int_1)*np.sin(theta))*np.sin(theta)/a**2)
+ grad_2 = -(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_2)*np.cos(theta))*np.sin(theta)/b**2 + 2*((-xc + x_int_2)*np.cos(theta) + (-yc + y_int_2)*np.sin(theta))*np.cos(theta)/a**2)/(2*((-xc + x_int_2)*np.sin(theta) + (-yc + y_int_2)*np.cos(theta))*np.cos(theta)/b**2 + 2*((-xc + x_int_2)*np.cos(theta) + (-yc + y_int_2)*np.sin(theta))*np.sin(theta)/a**2)
+
+ return ((x_int_1, y_int_1, grad_1), (x_int_2, y_int_2, grad_2))
+
+def ellipse_closest_point(xp, yp, xc, yc, a, b, th, n=1000, display=False):
+ """
+ xp (float): The x-coordinate of the reference point
+ yp (float): The y-coordinate of the reference point
+ xc (float): The x-coordinate of the ellipse's center.
+ yc (float): The y-coordinate of the ellipse's center.
+ a (float): The semi-major axis length of the ellipse.
+ b (float): The semi-minor axis length of the ellipse.
+ th (float): The rotation angle of the ellipse in degrees.
+ n (int): The number of discrete points used to draw the ellipse.
+ display (Boolean): Set to True to output figures and information.
+
+ Returns:
+ The distance between the reference point and the ellipse edge, and
+ the coordinates of the closest point on the ellipse edge.
+
+ """
+
+ t = np.linspace(0, 2 * np.pi, n)
+
+ x = xc + a * np.cos(t) * np.cos(np.deg2rad(th)) - b * np.sin(t) * np.sin(np.deg2rad(th))
+ y = yc + a * np.cos(t) * np.sin(np.deg2rad(th)) + b * np.sin(t) * np.cos(np.deg2rad(th))
+
+ #tRandom = np.random.randint(n)
+
+ #xp = x[tRandom]
+ #yp = y[tRandom]
+
+ dist = np.sqrt((x - xp) ** 2 + (y - yp) ** 2)
+ idx = list(dist).index(min(dist))
+
+ #ddistdt = ((b ** 2 - a ** 2) * np.cos(t) + a * np.sin(np.deg2rad(th)) * yp - a * np.sin(np.deg2rad(th)) * yc + a * np.cos(np.deg2rad(th)) * xp - a * np.cos(np.deg2rad(th)) * xc) * np.sin(t) + ((-b * np.cos(np.deg2rad(th)) * yp) + b * np.cos(np.deg2rad(th)) * yc + b * np.sin(np.deg2rad(th)) * xp - b * np.sin(np.deg2rad(th)) * xc) * np.cos(t)
+ #idx = np.where(ddistdt[1:] * ddistdt[:-1] < 0)[0] # find zeros
+ #m = (ddistdt[idx + 1] - ddistdt[idx]) / (t[idx + 1] - t[idx]) # slope
+ if display:
+ plt.figure(1)
+ plt.plot(x, y, '-', xp, yp, 'r+', x[idx], y[idx], 'r+')
+ plt.xlabel('x')
+ plt.ylabel('y')
+ plt.title('Ellipse, Point, and Zeros')
+
+ plt.figure(2)
+ plt.plot(t, dist, 'm.', t[idx], dist[idx], 'cx')
+ plt.xlabel('t')
+ plt.ylabel('Distance')
+ plt.title('Distance Function')
+
+ print(f'xp: {xp}, x[idx]: {x[idx]}')
+ print(f'yp: {yp}, y[idx]: {y[idx]}')
+ print('Error is: ', dist[idx])
+
+ plt.show()
+ plt.close()
+
+ return dist[idx], [x[idx],y[idx]]
+
+def ellipse_fit_errors(contour, h, k, a, b, theta, display):
+ """
+ Calculates the minimum distance between a point and the edge of a rotated and translated ellipse.
+
+ Parameters:
+ contour (array): The array of x, y coordindate points
+ h (float): The x-coordinate of the ellipse's center.
+ k (float): The y-coordinate of the ellipse's center.
+ a (float): The semi-major axis length of the ellipse.
+ b (float): The semi-minor axis length of the ellipse.
+ theta (float): The rotation angle of the ellipse in degrees.
+ display (boolean): Set to true to show figures.
+
+ Returns:
+ dictionary: The MAE, MSE, RMSE, and maximum error of the contour as compared against the
+ fitted ellipse.
+ """
+
+ errors = []
+
+ for point in contour:
+ dist2edge, edge_point = ellipse_closest_point(point[0], point[1], h, k, a, b, theta)
+ errors.append(dist2edge)
+
+ error_measures = {}
+
+ error_measures['MAE'] = sum([abs(error) for error in errors])/len(errors)
+ error_measures['MSE'] = sum([error**2 for error in errors])/len(errors)
+ error_measures['RMSE'] = np.sqrt(sum([error**2 for error in errors])/len(errors))
+ error_measures['Maximum error'] = max(errors)
+
+ return error_measures
+
+def ellipse_fit_img(img,display=False):
+ """Call this function to perform the circular fit.
+ For best results, peprocessing must be done before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+
+ edges_pts = extract_edges_CV(img) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ drop,CPs = prepare_hydrophobic(edges_pts,display)
+
+ # define baseline as between the two contact points
+
+ x = drop[:,0]
+ y = drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, img.shape[1],100)
+ baseline_y = slope*baseline_x+c
+
+ # timers
+ fit_preprocessing_time = time.time() - start_time
+ fit_start_time = time.time()
+
+ #fit
+ avec = fit_ellipse(x, y)
+ (a, b), phi_deg, t = ell_parameters(avec)
+
+ ell = Ellipse(
+ t, 2 * a, 2 * b, phi_deg,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ))
+
+ if display:
+ print('centre points: '+str(t))
+ print('a and b: '+str(a)+', '+str(b))
+ print('phi (°): '+str(phi_deg))
+
+ #plot
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.scatter(x ,y)
+ ax.plot(baseline_x,baseline_y)
+ ax.plot(CPs[0][0], CPs[0][1], 'yo')
+ ax.plot(CPs[1][0], CPs[1][1], 'go')
+ ax.plot()
+ plt.title('Fitted ellipse')
+ plt.imshow(img)
+ plt.show()
+ plt.close()
+
+ # Find intercepts, and gradients at intercepts
+ outputs = ellipse_line_intersection(t[0], t[1], a, b, math.radians(phi_deg), CPs[0][0], CPs[0][1], CPs[1][0], CPs[1][1])
+ left, right = list(sorted(outputs, key=lambda x: x[0]))
+ m_left, m_right = left[2], right[2]
+ intercepts = [[left[0],left[1]],[right[0],right[1]]]
+
+ CA = []
+ for output in outputs:
+ first = True
+ m = output[2] - slope
+
+ if output[1] > t[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+np.arctan(m)))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif output[1] > t[1] and first == False: #high CA angle right
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif output[1] < t[1] and first == True: #low CA angle left
+ CA.append(math.degrees(np.arctan(m)))
+ elif output[1] < t[1] and first == False: #low CA angle right
+ CA.append(math.degrees(np.pi+np.arctan(m)))
+
+
+ first = False
+
+ #if CA[0]!=CA[1]:
+ # CA[1] = 180 - CA[1]
+
+ fit_time = time.time() - fit_start_time
+
+ # using MAE, MSE, RMSE, max_error as error measure
+ errors = ellipse_fit_errors(drop,t[0],t[1],a,b,phi_deg,display)
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = fit_preprocessing_time
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, intercepts, t, (a,b), phi_deg, errors, timings
+
+def ellipse_fit(drop,display=False):
+ """Call this function to perform the ellipse fit.
+ For best results, peprocessing must be done before calling this function.
+
+ Make sure that the drop coordinate array consists of float values.
+ """
+
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+ CPs = [drop[0],drop[-1]]
+
+ # define baseline as between the two contact points
+
+ x, y = drop[:,0], drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, max(drop[:,0]),100)
+ baseline_y = slope*baseline_x+c
+
+ # Center estimates - it doesn't even take these
+ # x estimate is where between the lowest and highest points of the top section for a hydrophobic drop
+ x_m = min(x)+(max(x)-min(x))/2
+ # for full contour, y estimate is the halfway between max y and min y
+ y_m = min(y) + ((max(y)-min(y))/2)
+
+ #fit
+ avec = fit_ellipse(x, y)
+ (a, b), phi_deg, t = ell_parameters(avec)
+
+ ell = Ellipse(
+ t, 2 * a, 2 * b, phi_deg,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ), label='Fitted ellipse')
+
+ if display:
+ print('drop contour: ',drop)
+ print('centre points: '+str(t))
+ print('a and b: '+str(a)+', '+str(b))
+ print('phi (°): '+str(phi_deg))
+
+ if 1:#plot
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.scatter(x ,y, label='contour')
+ ax.plot(baseline_x,baseline_y, label='baseline')
+ ax.plot(CPs[0][0], CPs[0][1], 'yo', label='left contact point')
+ ax.plot(CPs[1][0], CPs[1][1], 'go', label='right contact point')
+ ax.plot()
+ ax.legend()
+ plt.title('Fitted ellipse')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ # Find intercepts, and gradients at intercepts
+ outputs = ellipse_line_intersection(t[0], t[1], a, b, math.radians(phi_deg), CPs[0][0], CPs[0][1], CPs[1][0], CPs[1][1])
+ left, right = list(sorted(outputs, key=lambda x: x[0]))
+ m_left, m_right = left[2], right[2]
+ intercepts = [[left[0],left[1]],[right[0],right[1]]]
+
+ CA = []
+ for output in outputs:
+ first = True
+ m = output[2] - slope
+
+ if output[1] > t[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+np.arctan(m)))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif output[1] > t[1] and first == False: #high CA angle right
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif output[1] < t[1] and first == True: #low CA angle left
+ CA.append(math.degrees(np.arctan(m)))
+ elif output[1] < t[1] and first == False: #low CA angle right
+ CA.append(math.degrees(np.pi+np.arctan(m)))
+
+
+ first = False
+
+ #if CA[0]!=CA[1]:
+ # CA[1] = 180 - CA[1]
+
+ fit_time = time.time() - start_time
+
+ try:# using MAE, MSE, RMSE, max_error as error measure
+ errors = ellipse_fit_errors(drop,t[0],t[1],a,b,phi_deg,display)
+ except:
+ errors = 'something went wrong fitting the ellipse...'
+ print(errors)
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, intercepts, t, (a,b), phi_deg, errors, timings
+
+if 0:
+ IMG_PATH = '../RICOphobic_cropped.png'
+ img = cv2.imread(IMG_PATH)
+
+ angles, intercepts, center, (a,b), theta, errors, timings = ellipse_fit_img(img, display=True)
+ print('angles: ', angles)
+ print('intercepts: ', intercepts)
+ print('center coordinates: ', center)
+ print('a: ',a)
+ print('b: ',b)
+ print('angle of ellipse rotation: ', theta)
+ print('errors: ',errors)
+ print('timings: ', timings)
diff --git a/modules/ellipse_fit_scikit.py b/modules/ellipse_fit_scikit.py
new file mode 100644
index 0000000..ef2ccb8
--- /dev/null
+++ b/modules/ellipse_fit_scikit.py
@@ -0,0 +1,1121 @@
+"""This code serves as a discrete instance of the elipse fit method of
+contact angle analysis.
+
+This is base on the circular fit code taken from the most recent version
+of conan - conan-ML_cv1.1/modules/select_regions.py"""
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import numpy as np
+from skimage.measure import EllipseModel
+import matplotlib.pyplot as plt
+import cv2
+from matplotlib.patches import Ellipse
+import math
+import time
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=False,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if cluster: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 1: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def ellipse_line_intersection(xc, yc, a, b, theta, x0, y0, x1, y1, display=False):
+ """
+ Finds the intersection between an ellipse and a line defined by two points.
+
+ Parameters:
+ xc (float): X-coordinate of the center of the ellipse.
+ yc (float): Y-coordinate of the center of the ellipse.
+ a (float): Length of the semi-major axis of the ellipse.
+ b (float): Length of the semi-minor axis of the ellipse.
+ theta (float): Angle of rotation of the ellipse in radians.
+ x0 (float): X-coordinate of the first point along the line.
+ y0 (float): Y-coordinate of the first point along the line.
+ x1 (float): X-coordinate of the second point along the line.
+ y1 (float): Y-coordinate of the second point along the line.
+ display (Boolean): Set to True to show figures
+
+ Returns:
+ tuple: A tuple containing two tuples, where each inner tuple contains the X and Y coordinates of the intercept and the gradient of the ellipse at the intercept.
+
+ """
+ # Plot the ellipse and the baseline
+ if display:
+ ell = Ellipse(
+ [xc,yc], 2 * a, 2 * b, theta,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ))
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.plot(x0, y0, 'yo')
+ ax.plot(x1, y1, 'go')
+ ax.plot()
+ plt.title('Drawn ellipse with line points in green and yellow')
+ plt.gca().invert_yaxis()
+ plt.show()
+ plt.close()
+
+ # Calculate the slope and intercept of the line
+ m = (y1 - y0) / (x1 - x0)
+ c = y0 - m * x0
+
+ # equation for ellipse in quadratic coefficient form
+ A = np.sin(theta)**2/b**2 + np.cos(theta)**2/a**2
+ B = -2*np.sin(theta)*np.cos(theta)/b**2 + 2*np.sin(theta)*np.cos(theta)/a**2
+ C = np.cos(theta)**2/b**2 + np.sin(theta)**2/a**2
+ D = - 2*xc*np.sin(theta)**2/b**2 + 2*yc*np.sin(theta)*np.cos(theta)/b**2 - 2*xc*np.cos(theta)**2/a**2 - 2*yc*np.sin(theta)*np.cos(theta)/a**2
+ E = 2*xc*np.sin(theta)*np.cos(theta)/b**2 - 2*yc*np.cos(theta)**2/b**2 - 2*xc*np.sin(theta)*np.cos(theta)/a**2 - 2*yc*np.sin(theta)**2/a**2
+ F = -1 + xc**2*np.sin(theta)**2/b**2 - 2*xc*yc*np.sin(theta)*np.cos(theta)/b**2 + yc**2*np.cos(theta)**2/b**2 + xc**2*np.cos(theta)**2/a**2 + 2*xc*yc*np.sin(theta)*np.cos(theta)/a**2 + yc**2*np.sin(theta)**2/a**2
+
+ # sub (mx + b) in for y, expand and simplify
+ A_new = A + B*m + C*m**2
+ B_new = B*c + 2*C*m*c + D + E*m
+ C_new = C*c**2 + E*c + F
+
+ # Calculate the discriminant of the quadratic equation
+ disc = B_new**2 - 4 * A_new * C_new
+
+ # If the discriminant is negative, there are no intercepts
+ if disc < 0:
+ print("The line does not intersect the ellipse.")
+ return None
+
+ # If the discriminant is zero, there is one intercept
+ elif disc == 0:
+ x_int = -B_new / (2 * A_new)
+ y_int = m * x_int + c
+ x_int_rot = x_int * np.cos(theta) - y_int * np.sin(theta)
+ y_int_rot = x_int * np.sin(theta) + y_int * np.cos(theta)
+ grad = -(np.cos(theta)**2 * x_int_rot) / (np.sin(theta)**2 * y_int_rot)
+
+ return ((x_int, y_int, grad),)
+
+ # If the discriminant is positive, there are two intercepts
+ else:
+ x_int_1 = (-B_new + np.sqrt(disc)) / (2 * A_new)
+ x_int_2 = (-B_new - np.sqrt(disc)) / (2 * A_new)
+ y_int_1 = m * x_int_1 + c
+ y_int_2 = m * x_int_2 + c
+
+ grad_1 = -(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_1)*np.cos(theta))*np.sin(theta)/b**2 + 2*((-xc + x_int_1)*np.cos(theta) + (-yc + y_int_1)*np.sin(theta))*np.cos(theta)/a**2)/(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_1)*np.cos(theta))*np.cos(theta)/b**2 + 2*((-xc + x_int_1)*np.cos(theta) + (-yc + y_int_1)*np.sin(theta))*np.sin(theta)/a**2)
+ grad_2 = -(2*((-xc + x_int_1)*np.sin(theta) + (-yc + y_int_2)*np.cos(theta))*np.sin(theta)/b**2 + 2*((-xc + x_int_2)*np.cos(theta) + (-yc + y_int_2)*np.sin(theta))*np.cos(theta)/a**2)/(2*((-xc + x_int_2)*np.sin(theta) + (-yc + y_int_2)*np.cos(theta))*np.cos(theta)/b**2 + 2*((-xc + x_int_2)*np.cos(theta) + (-yc + y_int_2)*np.sin(theta))*np.sin(theta)/a**2)
+
+ return ((x_int_1, y_int_1, grad_1), (x_int_2, y_int_2, grad_2))
+
+def ellipse_closest_point(xp, yp, xc, yc, a, b, th, n=1000, display=False):
+ """
+ xp (float): The x-coordinate of the reference point
+ yp (float): The y-coordinate of the reference point
+ xc (float): The x-coordinate of the ellipse's center.
+ yc (float): The y-coordinate of the ellipse's center.
+ a (float): The semi-major axis length of the ellipse.
+ b (float): The semi-minor axis length of the ellipse.
+ th (float): The rotation angle of the ellipse in degrees.
+ n (int): The number of discrete points used to draw the ellipse.
+ display (Boolean): Set to True to output figures and information.
+
+ Returns:
+ The distance between the reference point and the ellipse edge, and
+ the coordinates of the closest point on the ellipse edge.
+
+ """
+
+ t = np.linspace(0, 2 * np.pi, n)
+
+ x = xc + a * np.cos(t) * np.cos(np.deg2rad(th)) - b * np.sin(t) * np.sin(np.deg2rad(th))
+ y = yc + a * np.cos(t) * np.sin(np.deg2rad(th)) + b * np.sin(t) * np.cos(np.deg2rad(th))
+
+ #tRandom = np.random.randint(n)
+
+ #xp = x[tRandom]
+ #yp = y[tRandom]
+
+ dist = np.sqrt((x - xp) ** 2 + (y - yp) ** 2)
+ idx = list(dist).index(min(dist))
+
+ #ddistdt = ((b ** 2 - a ** 2) * np.cos(t) + a * np.sin(np.deg2rad(th)) * yp - a * np.sin(np.deg2rad(th)) * yc + a * np.cos(np.deg2rad(th)) * xp - a * np.cos(np.deg2rad(th)) * xc) * np.sin(t) + ((-b * np.cos(np.deg2rad(th)) * yp) + b * np.cos(np.deg2rad(th)) * yc + b * np.sin(np.deg2rad(th)) * xp - b * np.sin(np.deg2rad(th)) * xc) * np.cos(t)
+ #idx = np.where(ddistdt[1:] * ddistdt[:-1] < 0)[0] # find zeros
+ #m = (ddistdt[idx + 1] - ddistdt[idx]) / (t[idx + 1] - t[idx]) # slope
+ if display:
+ plt.figure(1)
+ plt.plot(x, y, '-', xp, yp, 'r+', x[idx], y[idx], 'r+')
+ plt.xlabel('x')
+ plt.ylabel('y')
+ plt.title('Ellipse, Point, and Zeros')
+
+ plt.figure(2)
+ plt.plot(t, dist, 'm.', t[idx], dist[idx], 'cx')
+ plt.xlabel('t')
+ plt.ylabel('Distance')
+ plt.title('Distance Function')
+
+ print(f'xp: {xp}, x[idx]: {x[idx]}')
+ print(f'yp: {yp}, y[idx]: {y[idx]}')
+ print('Error is: ', dist[idx])
+
+ plt.show()
+ plt.close()
+
+ return dist[idx], [x[idx],y[idx]]
+
+def ellipse_fit_errors(contour, h, k, a, b, theta, display):
+ """
+ Calculates the minimum distance between a point and the edge of a rotated and translated ellipse.
+
+ Parameters:
+ contour (array): The array of x, y coordindate points
+ h (float): The x-coordinate of the ellipse's center.
+ k (float): The y-coordinate of the ellipse's center.
+ a (float): The semi-major axis length of the ellipse.
+ b (float): The semi-minor axis length of the ellipse.
+ theta (float): The rotation angle of the ellipse in degrees.
+ display (boolean): Set to true to show figures.
+
+ Returns:
+ dictionary: The MAE, MSE, RMSE, and maximum error of the contour as compared against the
+ fitted ellipse.
+ """
+
+ errors = []
+
+ for point in contour:
+ dist2edge, edge_point = ellipse_closest_point(point[0], point[1], h, k, a, b, theta)
+ errors.append(dist2edge)
+
+ error_measures = {}
+
+ error_measures['MAE'] = sum([abs(error) for error in errors])/len(errors)
+ error_measures['MSE'] = sum([error**2 for error in errors])/len(errors)
+ error_measures['RMSE'] = np.sqrt(sum([error**2 for error in errors])/len(errors))
+ error_measures['Maximum error'] = max(errors)
+
+ return error_measures
+
+def ellipse_fit_img(img,display=False):
+ """Call this function to perform the circular fit.
+ For best results, peprocessing must be done before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+
+ edges_pts = extract_edges_CV(img) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ drop,CPs = prepare_hydrophobic(edges_pts,display)
+
+ # define baseline as between the two contact points
+
+ x = drop[:,0]
+ y = drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, img.shape[1],100)
+ baseline_y = slope*baseline_x+c
+
+ # timers
+ fit_preprocessing_time = time.time() - start_time
+ fit_start_time = time.time()
+
+ # Center estimates
+ # x estimate is where between the lowest and highest points of the top section for a hydrophobic drop
+ x_m = min(x)+(max(x)-min(x))/2
+ # for full contour, y estimate is the halfway between max y and min y
+ y_m = min(y) + ((max(y)-min(y))/2)
+
+ #fit
+ avec = fit_ellipse(x, y)
+ (a, b), phi_deg, t = ell_parameters(avec)
+
+ ell = Ellipse(
+ t, 2 * a, 2 * b, phi_deg,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ))
+
+ if display:
+ print('centre points: '+str(t))
+ print('a and b: '+str(a)+', '+str(b))
+ print('phi (°): '+str(phi_deg))
+
+ #plot
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.scatter(x ,y)
+ ax.plot(baseline_x,baseline_y)
+ ax.plot(CPs[0][0], CPs[0][1], 'yo')
+ ax.plot(CPs[1][0], CPs[1][1], 'go')
+ ax.plot()
+ plt.title('Fitted ellipse')
+ plt.imshow(img)
+ plt.show()
+ plt.close()
+
+ # Find intercepts, and gradients at intercepts
+ outputs = ellipse_line_intersection(t[0], t[1], a, b, math.radians(phi_deg), CPs[0][0], CPs[0][1], CPs[1][0], CPs[1][1])
+ left, right = list(sorted(outputs, key=lambda x: x[0]))
+ m_left, m_right = left[2], right[2]
+ intercepts = [[left[0],left[1]],[right[0],right[1]]]
+
+ CA = []
+ for output in outputs:
+ first = True
+ m = output[2] - slope
+
+ if output[1] > t[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+np.arctan(m)))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif output[1] > t[1] and first == False: #high CA angle right
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif output[1] < t[1] and first == True: #low CA angle left
+ CA.append(math.degrees(np.arctan(m)))
+ elif output[1] < t[1] and first == False: #low CA angle right
+ CA.append(math.degrees(np.pi+np.arctan(m)))
+
+
+ first = False
+
+ #if CA[0]!=CA[1]:
+ # CA[1] = 180 - CA[1]
+
+ fit_time = time.time() - fit_start_time
+
+ # using MAE, MSE, RMSE, max_error as error measure
+ errors = ellipse_fit_errors(drop,t[0],t[1],a,b,phi_deg,display)
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = fit_preprocessing_time
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, intercepts, t, (a,b), phi_deg, errors, timings
+
+def ellipse_fit(drop,display=False):
+ """Call this function to perform the circular fit.
+ For best results, peprocessing must be done before calling this function.
+ """
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+ CPs = [drop[0],drop[-1]]
+
+ # define baseline as between the two contact points
+
+ x, y = drop[:,0], drop[:,1]
+ rise = CPs[1][1]-CPs[0][1]
+ run = CPs[1][0]-CPs[0][0]
+ slope = rise/run
+ baseline = [(CPs[0][0],CPs[0][1]), slope]
+ c = CPs[0][1]-(slope*CPs[0][0])
+ baseline_x = np.linspace(1, max(drop[:,0]),100)
+ baseline_y = slope*baseline_x+c
+
+ #fit
+ fit_ell = EllipseModel()
+ fit_ell.estimate(drop)
+ xc, yc, a, b, phi_deg = fit_ell.params
+ t = [xc, yc]
+
+ ell = Ellipse(
+ t, 2 * a, 2 * b, phi_deg,
+ facecolor=( 1, 0, 0, 0 ), edgecolor=( 1, 0, 0, 1 ), label='Fitted ellipse')
+
+ if display:
+ print('drop contour: ',drop)
+ print('centre points: '+str(t))
+ print('a and b: '+str(a)+', '+str(b))
+ print('phi (°): '+str(phi_deg))
+
+ if 0:#plot
+ fig = plt.figure()
+ ax = fig.add_subplot( 1, 1, 1 )
+ ax.add_patch( ell )
+ ax.scatter(x ,y, label='contour')
+ ax.plot(baseline_x,baseline_y, label='baseline')
+ ax.plot(CPs[0][0], CPs[0][1], 'yo', label='left contact point')
+ ax.plot(CPs[1][0], CPs[1][1], 'go', label='right contact point')
+ ax.plot()
+ ax.legend()
+ plt.title('Fitted ellipse')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ # Find intercepts, and gradients at intercepts
+ outputs = ellipse_line_intersection(t[0], t[1], a, b, math.radians(phi_deg), CPs[0][0], CPs[0][1], CPs[1][0], CPs[1][1])
+ left, right = list(sorted(outputs, key=lambda x: x[0]))
+ m_left, m_right = left[2], right[2]
+ intercepts = [[left[0],left[1]],[right[0],right[1]]]
+
+ CA = []
+ for output in outputs:
+ first = True
+ m = output[2] - slope
+
+ if output[1] > t[1] and first == True: #high CA angle left
+ #CA.append(math.degrees(np.pi+np.arctan(m)))
+ CA.append(180 - abs(math.degrees(np.arctan(m))))
+ elif output[1] > t[1] and first == False: #high CA angle right
+ CA.append(abs(math.degrees(np.arctan(m))))
+ elif output[1] < t[1] and first == True: #low CA angle left
+ CA.append(math.degrees(np.arctan(m)))
+ elif output[1] < t[1] and first == False: #low CA angle right
+ CA.append(math.degrees(np.pi+np.arctan(m)))
+
+
+ first = False
+
+ #if CA[0]!=CA[1]:
+ # CA[1] = 180 - CA[1]
+
+ fit_time = time.time() - start_time
+
+ # using MAE, MSE, RMSE, max_error as error measure
+ errors = ellipse_fit_errors(drop,t[0],t[1],a,b,phi_deg,display)
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return CA, intercepts, t, (a,b), phi_deg, errors, timings
+
+if 0:
+ IMG_PATH = '../RICOphobic_cropped.png'
+ img = cv2.imread(IMG_PATH)
+
+ angles, intercepts, center, (a,b), theta, errors, timings = ellipse_fit_img(img, display=True)
+ print('angles: ', angles)
+ print('intercepts: ', intercepts)
+ print('center coordinates: ', center)
+ print('a: ',a)
+ print('b: ',b)
+ print('angle of ellipse rotation: ', theta)
+ print('errors: ',errors)
+ print('timings: ', timings)
diff --git a/modules/extract_profile.py b/modules/extract_profile.py
new file mode 100644
index 0000000..67bbedc
--- /dev/null
+++ b/modules/extract_profile.py
@@ -0,0 +1,867 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+import numpy as np
+import cv2
+import matplotlib.pyplot as plt
+from sklearn.cluster import OPTICS # DS 7/6/21 - for clustering algorithm
+# import time
+# import datetime
+from .preprocessing import extract_edges_CV
+
+BLUR_SIZE = 3
+VERSION_CV2 = cv2.__version__
+
+def extract_drop_profile(raw_experiment, user_inputs):
+ if user_inputs.threshold_method == "User-selected":
+ # profile_edges = detect_edges(raw_experiment.cropped_image, raw_experiment, user_inputs.drop_region)
+ # profile, raw_experiment.ret = detect_edges(raw_experiment.cropped_image, raw_experiment, user_inputs.drop_region)
+ raw_experiment.contour, raw_experiment.ret = detect_edges(raw_experiment.cropped_image, raw_experiment, user_inputs.drop_region, 1, user_inputs.threshold_val)
+
+ if 1:
+ plt.imshow(raw_experiment.cropped_image)
+ plt.plot(raw_experiment.contour[:,0],raw_experiment.contour[:,1],'r,')
+ plt.title('Extracted drop profile\nTheshold value of : '+str(raw_experiment.ret))
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ elif user_inputs.threshold_method == "Automated":
+ if raw_experiment.ret == None:
+ raw_experiment.contour, raw_experiment.ret = extract_edges_CV(raw_experiment.cropped_image, return_thresholed_value=True)
+
+ if 1:
+ plt.imshow(raw_experiment.cropped_image)
+ plt.plot(raw_experiment.contour[:,0],raw_experiment.contour[:,1],'r,')
+ plt.title('Extracted drop profile\nTheshold value of : '+str(raw_experiment.ret))
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+ else:
+ # if a threshold value has been selected then use this
+ raw_experiment.contour = extract_edges_CV(raw_experiment.cropped_image, threshold_val=raw_experiment.ret, return_thresholed_value=False)
+
+
+
+ #needle_crop = image_crop(raw_experiment.image, user_inputs.needle_region)
+ #raw_experiment.needle_data, ret = detect_edges(needle_crop, raw_experiment, user_inputs.needle_region, raw_experiment.ret, 2)
+
+ # # detect needle edges
+ # needle_crop = image_crop(raw_experiment.image, user_inputs.needle_region)
+ # raw_experiment.needle_data = detect_edges(needle_crop, user_inputs.needle_region)
+
+def image_crop(image, points): # loaded in conan.py
+ # return image[min(y):max(y), min(x),max(x)]
+ return image[int(points[0][1]):int(points[1][1]), int(points[0][0]):int(points[1][0])]
+
+def detect_edges(image, raw_experiment, points, n_contours, threshValue):
+
+ if len(image.shape) != 2:
+ image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
+
+ if 1:
+ blur = cv2.GaussianBlur(image,(BLUR_SIZE,BLUR_SIZE),0) # apply Gaussian blur to drop edge
+
+ # if ret == -1:
+ ret, thresh = cv2.threshold(blur,threshValue,255,cv2.THRESH_BINARY)#+cv2.THRESH_OTSU) # calculate thresholding
+ # else:
+ # ret, thresh = cv2.threshold(blur,ret,255,cv2.THRESH_BINARY) # calculate thresholding
+ # these values seem to agree with
+ # - http://www.academypublisher.com/proc/isip09/papers/isip09p109.pdf
+ # - http://stackoverflow.com/questions/4292249/automatic-calculation-of-low-and-high-thresholds-for-the-canny-operation-in-open
+ # edges = cv2.Canny(thresh,0.5*ret,ret) # detect edges using Canny edge detection
+
+ # error in PDT code - shouldn't threshold before Canny - otherwise Canny is useless
+ #edges = cv2.Canny(blur,0.5*ret,ret) # detect edges using Canny edge detection
+
+ contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
+
+ #contour_lengths = [length for length in cv2.arcLength(contours,0)] #list to hold all areas
+ contour_lengths = [cv2.arcLength(contour,0) for contour in contours] #list to hold all areas
+
+ indexed_contour_lengths = np.array(contour_lengths).argsort()[::-1]
+ indexed_contours_to_return = indexed_contour_lengths[:n_contours]
+
+ image_height = raw_experiment.image.shape[0]
+
+ offset = [0,0]#[0, image.shape[1]]
+ points = []
+ for index in indexed_contours_to_return:
+ current_contour = contours[index][:,0]
+ for i in range(current_contour.shape[0]):
+ current_contour[i,1] = current_contour[i,1]
+ current_contour[i,:] = current_contour[i,:] + offset
+ #points.append(current_contour[current_contour[:,1].argsort()])
+
+
+ size = 0
+ cropped_image_height = image.shape[0]
+ cropped_image_width = image.shape[1]
+ for i in range(current_contour.shape[0]): #Trim edges from contour #current_contour error
+ if current_contour[i,0] != 0:
+ if current_contour[i,0] != cropped_image_width-1:
+ if current_contour[i,1] != 0:
+ if current_contour[i,1] != (cropped_image_height-1):
+ size = size+1
+# print(current_contour.shape[0])
+# print(size)
+ contour_trimmed = np.zeros((size,2))
+
+ index = 0
+ for i in range(current_contour.shape[0]):
+ if current_contour[i,0] != 0:
+ if current_contour[i,0] != cropped_image_width-1:
+ if current_contour[i,1] != 0:
+ if current_contour[i,1] != (cropped_image_height-1):
+ contour_trimmed[index,:] = current_contour[i,:]
+ index = index+1
+
+ contour_x = contour_trimmed[:,0]
+ contour_y = contour_trimmed[:,1]
+
+ if 0:
+ plt.axis('equal')
+ plt.imshow(image)
+ plt.plot(contour_x,contour_y,'rs',markerfacecolor='none')
+ plt.show()
+ #cv2.imshow('img',image)
+ #cv2.imshow('img',thresh)
+ #cv2.drawContours(image,contour_trimmed)
+ #cv2.drawContours(image,contours,0,(0,255,0),10)
+ #cv2.drawContours(image,contours,1,(255,255,0),10)
+ #cv2.waitKey(0)
+
+ #plt.imshow(image, origin='upper', cmap = 'gray')
+
+ # find line between first and last contour points to estimate surface line
+ if 0:
+ contour_pts = contour_trimmed
+ N = np.shape(contour_pts)[0]
+ A = 1 #50 # maybe lower this?
+ #xx = np.concatenate((contour_x[0:A],contour_x[N-A:N+1]))
+ #yy = np.concatenate((contour_y[0:A],contour_y[N-A:N+1]))
+ xx = np.concatenate((contour_pts[0:A,0],contour_pts[N-A:N+1,0]))
+ yy = np.concatenate((contour_pts[0:A,1],contour_pts[N-A:N+1,1]))
+
+
+ coefficients = np.polyfit(xx, yy, 1)
+ line = np.poly1d(coefficients)
+ prepared_contour, CPs = prepare_hydrophobic(contour_pts)
+ return prepared_contour, ret # returns no surface line
+ #return contour_pts, line, ret
+ else:
+ N = np.shape(contour_trimmed)[0]
+ A = 1 #50 # maybe lower this?
+ #xx = np.concatenate((contour_x[0:A],contour_x[N-A:N+1]))
+ #yy = np.concatenate((contour_y[0:A],contour_y[N-A:N+1]))
+ xx = np.concatenate((contour_trimmed[0:A,0],contour_trimmed[N-A:N+1,0]))
+ yy = np.concatenate((contour_trimmed[0:A,1],contour_trimmed[N-A:N+1,1]))
+
+ coefficients = np.polyfit(xx, yy, 1)
+ line = np.poly1d(coefficients)
+ return contour_trimmed.astype(float), ret
+ #
+
+
+
+
+ # points = largest_contour[largest_contour[:,1].argsort()]
+
+
+ # # determines the largest contour.
+ # # hierarchy describes parent-child relationship
+ # # this routine determines the length of each contour
+ # # and returns the largest
+ # drop_index = 0
+ # maxLength = 0.0
+ # for i in range(np.max(hierarchy+1)):
+ # length = cv2.arcLength(contours[i],0)
+ # # print(i, length)
+ # if length > maxLength:
+ # maxLength = length
+ # drop_index = i
+
+
+ # # the largest contour
+ # largest_contour = contours[drop_index][:,0]
+
+ # # converts the data to (x, y) data where (0, 0) is the lower-left pixel
+ # image_height = raw_experiment.image.shape[0]
+ # offset = [points[0][0], image_height - points[0][1]]
+ # for i in range(largest_contour.shape[0]):
+ # largest_contour[i,1] = - largest_contour[i,1]
+ # largest_contour[i,:] = largest_contour[i,:] + offset
+ # points = largest_contour[largest_contour[:,1].argsort()]
+
+ # return points, ret
+
+# def calculate_needle_diameter(raw_experiment, fitted_drop_data, tolerances):
+
+def distance(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+ return path
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+
+def prepare_hydrophobic(coords,xi=0.8):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if 0: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.05
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+ #print('The x value of the apex is: ',xapex)
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] < xapex:
+ l_drop.append(n)
+ if n[0] > xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ l_drop[:,[0]] = -l_drop[:,[0]] + xapex
+
+ if 0:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ xhalfdrop = halfdrop[:,[0]].reshape(len(halfdrop[:,[0]]))
+ yhalfdrop = halfdrop[:,[1]].reshape(len(halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(halfdrop[:,[1]]) + (max(halfdrop[:,[1]]) - min(halfdrop[:,[1]]))*percent
+ for n in halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 0: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+
+ xCP = min(xbottom)
+ yCP = []
+ for coord in halfdrop:
+ if coord[0]==xCP:
+ yCP.append(coord[1])
+ yCP =min(yCP)
+ #print('The first few coordinates of xhalfdrop are: ', xhalfdrop[:3])
+
+ #print('The coordinates of the contact point are (',xCP,',',yCP,')')
+
+ CPs[counter] = [xCP, yCP]
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (-x[0],x[1]))
+ new_halfdrop = optimized_path(new_halfdrop)[::-1]
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # remove surface line past the contact point
+
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if 0: #display
+ plt.title('outputted halfdrop')
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if 0:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def distance(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+
+ path = np.array(path)
+ return path
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
diff --git a/modules/extract_profile.pyc b/modules/extract_profile.pyc
new file mode 100644
index 0000000..249047b
Binary files /dev/null and b/modules/extract_profile.pyc differ
diff --git a/modules/extract_profile.py~ b/modules/extract_profile.py~
new file mode 100644
index 0000000..0d621e3
--- /dev/null
+++ b/modules/extract_profile.py~
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+import numpy as np
+import cv2
+import matplotlib.pyplot as plt
+from subpixel_edges import subpixel_edges
+# import time
+# import datetime
+
+BLUR_SIZE = 3
+VERSION_CV2 = cv2.__version__
+
+def extract_drop_profile(raw_experiment, user_inputs):
+ profile_crop = image_crop(raw_experiment.image, user_inputs.drop_region)
+ # profile_edges = detect_edges(profile_crop, raw_experiment, user_inputs.drop_region)
+ # profile, raw_experiment.ret = detect_edges(profile_crop, raw_experiment, user_inputs.drop_region)
+ profile, surface, raw_experiment.ret = detect_edges(profile_crop, raw_experiment, user_inputs.drop_region, -1, 1,user_inputs.threshold_val)
+ raw_experiment.drop_data = profile#[0]
+ raw_experiment.surface_data = surface
+# needle_crop = image_crop(raw_experiment.image, user_inputs.needle_region)
+# raw_experiment.needle_data, ret = detect_edges(needle_crop, raw_experiment, user_inputs.needle_region, raw_experiment.ret, 2)
+
+
+
+ # # detect needle edges
+ # needle_crop = image_crop(raw_experiment.image, user_inputs.needle_region)
+ # raw_experiment.needle_data = detect_edges(needle_crop, user_inputs.needle_region)
+
+
+
+def image_crop(image, points):
+ # return image[points[0][0]:points[0][1], points[1][0]:points[1][1]]
+ # return image[points[0][1]:points[1][1], points[0][0]:points[1][0]]
+ # imageUD = np.flipud(image)
+ # pixels are referenced as image[y][x] - row major order
+
+ #print(points)
+ #print(np.shape(points))
+ # for i,point in points:
+ # for j,coord in point:
+ # #points(i,j) = int(coord)
+ # print(coord)
+# points[0][0] = int(points[0][0])
+# points[0][1] = int(points[0][1])
+# points[1][0] = int(points[1][0])
+# points[1][1] = int(points[1][1])
+ #print(points)
+ return image[int(points[0][1]):int(points[1][1]), int(points[0][0]):int(points[1][0])]
+
+def detect_edges(image, raw_experiment, points, ret, n_contours,threshValue):
+ # image = np.flipud(imageUD)
+ if len(image.shape) != 2:
+ image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
+
+ blur = cv2.GaussianBlur(image,(BLUR_SIZE,BLUR_SIZE),0) # apply Gaussian blur to drop edge
+ # if ret == -1:
+ ret, thresh = cv2.threshold(blur,threshValue,255,cv2.THRESH_BINARY)#+cv2.THRESH_OTSU) # calculate thresholding
+ print(ret)
+ # else:
+ # ret, thresh = cv2.threshold(blur,ret,255,cv2.THRESH_BINARY) # calculate thresholding
+ # these values seem to agree with
+ # - http://www.academypublisher.com/proc/isip09/papers/isip09p109.pdf
+ # - http://stackoverflow.com/questions/4292249/automatic-calculation-of-low-and-high-thresholds-for-the-canny-operation-in-open
+ # edges = cv2.Canny(thresh,0.5*ret,ret) # detect edges using Canny edge detection
+
+ # error in PDT code - shouldn't threshold before Canny - otherwise Canny is useless
+ #edges = cv2.Canny(blur,0.5*ret,ret) # detect edges using Canny edge detection
+
+ if float(VERSION_CV2[0]) > 2: #Version 3 of opencv returns an extra argument
+ contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
+ else:
+ contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
+
+ # sub-pixel edge finder, added May 2021 JB
+ #img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
+ print(np.shape(image))
+ edges = subpixel_edges(image.astype(float), threshValue, 0, 2)
+
+ edges_list = list(zip(edges.x,edges.y))
+ start = sorted(edges_list, key=lambda x: (x[0]))[0]
+ path = optimized_path(edges_list,start)
+ #cv2.imshow('img',image)
+ #cv2.waitKey(0)
+
+ ##plt.figure()
+ #plt.imshow(image)
+ #plt.plot(drop_profile[:,0],drop_profile[:,1],'r')
+ #plt.plot(drop_profile_noise[:,0],drop_profile_noise[:,1],'bx')
+ #plt.plot(*zip(*path), 'r')
+
+ #plt.plot(edges.x,edges.y,'bx')
+ #plt.axis('off')
+ #plt.show()
+
+ contour_lengths = [] #list to hold all areas
+
+ for contour in contours:
+ length = cv2.arcLength(contour,0)
+ contour_lengths.append(length)
+
+ indexed_contour_lengths = np.array(contour_lengths).argsort()[::-1]
+ indexed_contours_to_return = indexed_contour_lengths[:n_contours]
+
+ image_height = raw_experiment.image.shape[0]
+
+ offset = [0,0]#[0, image.shape[1]]
+ points = []
+ for index in indexed_contours_to_return:
+ current_contour = contours[index][:,0]
+ for i in range(current_contour.shape[0]):
+ current_contour[i,1] = current_contour[i,1]
+ current_contour[i,:] = current_contour[i,:] + offset
+ # points.append(current_contour[current_contour[:,1].argsort()])
+
+
+ size = 0
+ cropped_image_height = image.shape[0]
+ cropped_image_width = image.shape[1]
+ for i in range(current_contour.shape[0]): #Trim edges from contour
+ if current_contour[i,0] != 0:
+ if current_contour[i,0] != cropped_image_width-1:
+ if current_contour[i,1] != 0:
+ if current_contour[i,1] != (cropped_image_height-1):
+ size = size+1
+# print(current_contour.shape[0])
+# print(size)
+ current_contour_trimmed = np.zeros((size,2))
+
+ index = 0
+ for i in range(current_contour.shape[0]):
+ if current_contour[i,0] != 0:
+ if current_contour[i,0] != cropped_image_width-1:
+ if current_contour[i,1] != 0:
+ if current_contour[i,1] != (cropped_image_height-1):
+ current_contour_trimmed[index,:] = current_contour[i,:]
+ index = index+1
+
+ contour_x = current_contour_trimmed[:,0]
+ contour_y = current_contour_trimmed[:,1]
+ N = np.shape(contour_x)[0]
+ # print N
+ plt.axis('equal')
+ plt.imshow(image)
+ plt.plot(contour_x,contour_y,'rs',markerfacecolor='none')
+ plt.plot(edges.x,edges.y,'bs',markerfacecolor='none')
+ plt.quiver(edges.x, edges.y, edges.nx, -edges.ny, scale=20)
+ #plt.plot(*zip(*path), 'b-x')
+ #plt.plot(start[0],start[1], 'ms')
+ plt.show()
+ #cv2.imshow('img',image)
+ #cv2.imshow('img',thresh)
+ #cv2.drawContours(image,current_contour_trimmed)
+ #cv2.drawContours(image,contours,0,(0,255,0),10)
+ #cv2.drawContours(image,contours,1,(255,255,0),10)
+ #cv2.waitKey(0)
+
+ A = 50
+ xx = np.concatenate((contour_x[0:A],contour_x[N-A:N+1]))
+ yy = np.concatenate((contour_y[0:A],contour_y[N-A:N+1]))
+
+ coefficients = np.polyfit(xx, yy, 1)
+ line = np.poly1d(coefficients)
+ # plt.plot(contour_x,line(contour_x),'r-',linewidth=2.0)
+ # plt.plot(xx,yy,'o',markeredgecolor="hotpink",markerfacecolor="hotpink",markersize = 10.0)
+
+ #plt.imshow(image, origin='upper', cmap = 'gray')
+ # plt.plot(contour_x,contour_y,"--",color="white",linewidth = 2.0)
+ #plt.show()
+
+
+ return current_contour_trimmed, line, ret
+ # points = largest_contour[largest_contour[:,1].argsort()]
+
+
+ # # determines the largest contour.
+ # # hierarchy describes parent-child relationship
+ # # this routine determines the length of each contour
+ # # and returns the largest
+ # drop_index = 0
+ # maxLength = 0.0
+ # for i in range(np.max(hierarchy+1)):
+ # length = cv2.arcLength(contours[i],0)
+ # # print(i, length)
+ # if length > maxLength:
+ # maxLength = length
+ # drop_index = i
+
+
+ # # the largest contour
+ # largest_contour = contours[drop_index][:,0]
+
+ # # converts the data to (x, y) data where (0, 0) is the lower-left pixel
+ # image_height = raw_experiment.image.shape[0]
+ # offset = [points[0][0], image_height - points[0][1]]
+ # for i in range(largest_contour.shape[0]):
+ # largest_contour[i,1] = - largest_contour[i,1]
+ # largest_contour[i,:] = largest_contour[i,:] + offset
+ # points = largest_contour[largest_contour[:,1].argsort()]
+
+ # return points, ret
+
+# def calculate_needle_diameter(raw_experiment, fitted_drop_data, tolerances):
+
+def distance(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+ return path
diff --git a/modules/fits.py b/modules/fits.py
new file mode 100644
index 0000000..e60fa1c
--- /dev/null
+++ b/modules/fits.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+import numpy as np
+import cv2
+import matplotlib.pyplot as plt
+from sklearn.cluster import OPTICS # DS 7/6/21 - for clustering algorithm
+import time
+import datetime
+
+def perform_fits(experimental_drop, tangent=False, polynomial=False, circle=False, ellipse=False, YL=False):
+ if tangent == True:
+ from .polynomial_fit import polynomial_fit
+ tangent_angles, tangent_CPs, tangent_lines, tangent_errors, tangent_timings = polynomial_fit(experimental_drop.drop_contour, polynomial_degree=1)
+ experimental_drop.contact_angles['tangent fit'] = {}
+ experimental_drop.contact_angles['tangent fit']['left angle'] = tangent_angles[0]
+ experimental_drop.contact_angles['tangent fit']['right angle'] = tangent_angles[1]
+ experimental_drop.contact_angles['tangent fit']['contact points'] = tangent_CPs
+ experimental_drop.contact_angles['tangent fit']['tangent lines'] = tangent_lines
+ experimental_drop.contact_angles['tangent fit']['errors'] = tangent_errors
+ experimental_drop.contact_angles['tangent fit']['timings'] = tangent_timings
+ if polynomial == True:
+ from .polynomial_fit import polynomial_fit
+ polynomial_angles, polynomial_CPs, polynomial_lines, polynomial_errors, polynomial_timings = polynomial_fit(experimental_drop.drop_contour, polynomial_degree=2)
+ experimental_drop.contact_angles['polynomial fit'] = {}
+ experimental_drop.contact_angles['polynomial fit']['left angle'] = polynomial_angles[0]
+ experimental_drop.contact_angles['polynomial fit']['right angle'] = polynomial_angles[1]
+ experimental_drop.contact_angles['polynomial fit']['contact points'] = polynomial_CPs
+ experimental_drop.contact_angles['polynomial fit']['tangent lines'] = polynomial_lines
+ experimental_drop.contact_angles['polynomial fit']['errors'] = polynomial_errors
+ experimental_drop.contact_angles['polynomial fit']['timings'] = polynomial_timings
+ if circle == True:
+ from .circular_fit import circular_fit
+ circle_angles, circle_center, circle_radius, circle_intercepts, circle_errors, circle_timings = circular_fit(experimental_drop.drop_contour)
+ experimental_drop.contact_angles['circle fit'] = {}
+ experimental_drop.contact_angles['circle fit']['left angle'] = circle_angles[0]
+ experimental_drop.contact_angles['circle fit']['right angle'] = circle_angles[1]
+ experimental_drop.contact_angles['circle fit']['baseline intercepts'] = circle_intercepts
+ experimental_drop.contact_angles['circle fit']['circle center'] = circle_center
+ experimental_drop.contact_angles['circle fit']['circle radius'] = circle_radius
+ experimental_drop.contact_angles['circle fit']['errors'] = circle_errors
+ experimental_drop.contact_angles['circle fit']['timings'] = circle_timings
+ if ellipse == True:
+ from .ellipse_fit import ellipse_fit
+ ellipse_angles, ellipse_intercepts, ellipse_center, ellipse_ab, ellipse_rotation, ellipse_errors, ellipse_timings = ellipse_fit(experimental_drop.drop_contour)
+ experimental_drop.contact_angles['ellipse fit'] = {}
+ experimental_drop.contact_angles['ellipse fit']['left angle'] = ellipse_angles[0]
+ experimental_drop.contact_angles['ellipse fit']['right angle'] = ellipse_angles[1]
+ experimental_drop.contact_angles['ellipse fit']['baseline intercepts'] = ellipse_intercepts
+ experimental_drop.contact_angles['ellipse fit']['ellipse center'] = ellipse_center
+ experimental_drop.contact_angles['ellipse fit']['ellipse a and b'] = ellipse_ab
+ experimental_drop.contact_angles['ellipse fit']['ellipse rotation'] = ellipse_rotation
+ experimental_drop.contact_angles['ellipse fit']['errors'] = ellipse_errors
+ experimental_drop.contact_angles['ellipse fit']['timings'] = ellipse_timings
+ if YL == True:
+ from .BA_fit import YL_fit
+ YL_angles, YL_Bo, YL_baselinewidth, YL_volume, YL_shape, YL_baseline, YL_errors, sym_errors, YL_timings = YL_fit(experimental_drop.drop_contour)
+ experimental_drop.contact_angles['YL fit'] = {}
+ experimental_drop.contact_angles['YL fit']['left angle'] = YL_angles[0]
+ experimental_drop.contact_angles['YL fit']['right angle'] = YL_angles[1]
+ experimental_drop.contact_angles['YL fit']['bond number'] = YL_Bo
+ experimental_drop.contact_angles['YL fit']['baseline width'] = YL_baselinewidth
+ experimental_drop.contact_angles['YL fit']['volume'] = YL_volume
+ experimental_drop.contact_angles['YL fit']['fit shape'] = YL_shape
+ experimental_drop.contact_angles['YL fit']['baseline'] = YL_baseline
+ experimental_drop.contact_angles['YL fit']['errors'] = YL_errors
+ experimental_drop.contact_angles['YL fit']['symmetry errors'] = sym_errors
+ experimental_drop.contact_angles['YL fit']['timings'] = YL_timings
diff --git a/modules/initialise_parameters.py b/modules/initialise_parameters.py
new file mode 100644
index 0000000..35864d0
--- /dev/null
+++ b/modules/initialise_parameters.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+#coding=utf-8
+import math
+import sys
+import numpy as np
+
+
+def initialise_parameters(experimental_drop, drop_data):
+ omega_rotation = 0.0 # initial rotation angle (should revisit this)
+ [x_apex, y_apex, radius_apex] = fit_circle(experimental_drop.drop_data)
+ bond_number = calculate_Bond_number(experimental_drop.drop_data, x_apex, y_apex, radius_apex)
+ drop_data.params = [x_apex, y_apex, radius_apex, bond_number, omega_rotation]
+ # maybe calculate_max_s() to determine initial max_s - although current version can handle max_s being too small
+ drop_data.max_s = 4.0
+
+
+# fits a circle to the drop apex to calculate the (x, y) coordinate and apex radius R_0
+def fit_circle(xypoints):
+ lenpoints = len(xypoints)
+ sumX = np.float64(0.0)
+ sumY = np.float64(0.0)
+ sumX2 = np.float64(0.0)
+ sumY2 = np.float64(0.0)
+ sumXY = np.float64(0.0)
+ sumX3 = np.float64(0.0)
+ sumY3 = np.float64(0.0)
+ sumX2Y = np.float64(0.0)
+ sumXY2 = np.float64(0.0)
+ n = max(10, int(0.1 * lenpoints)) # ensure at least 10 points are used...
+ if n > lenpoints:
+ n = lenpoints # if there is not enough points take all points
+ for k in range(0, n):
+ xk, yk = xypoints[k]
+ sumX += xk
+ sumY += yk
+ sumX2 += xk**2
+ sumY2 += yk**2
+ sumXY += xk * yk
+ sumX3 += xk**3
+ sumY3 += yk**3
+ sumX2Y += (xk**2)*yk
+ sumXY2 += xk*(yk**2)
+ d11 = n * sumXY - sumX * sumY
+ d20 = n * sumX2 - sumX**2
+ d02 = n * sumY2 - sumY**2
+ d30 = n * sumX3 - sumX2 * sumX
+ d03 = n * sumY3 - sumY2 * sumY
+ d21 = n * sumX2Y - sumX2 * sumY
+ d12 = n * sumXY2 - sumX * sumY2
+ x = ((d30 + d12) * d02 - (d03 + d21) * d11) / (2 * (d20 * d02 - d11**2))
+ y = ((d03 + d21) * d20 - (d30 + d12) * d11) / (2 * (d20 * d02 - d11**2))
+ c = (sumX2 + sumY2 - 2 * x *sumX - 2 * y * sumY) / n
+ R = math.sqrt(c + x**2 + y**2)
+ return [x, y - R, R]
+
+# calculates the initial guess for the Bond number using method
+# from Neeson et al. (see Mathematica Notebook - CalculatingBond.nb)
+def calculate_Bond_number(xypoints, x_apex, y_apex, radius_apex):
+ r_z2 = scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, 2)
+ if r_z2 > 0: #JB edit 26/3/15
+ bond = 0.1756 * r_z2**2 + 0.5234 * r_z2**3 - 0.2563 * r_z2**4 # interpolated from numerical data
+ return bond
+ r_z1 = scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, 1)
+# if r_z1 > 0: #JB edit 26/3/15
+# bond = 5.819 * (r_z1 - 1) # interpolated from numerical data
+# return bond
+ # finally, if nether of these work, just use a naive guess
+ return 0.15 #JB edit 26/3/2015
+
+# calculates the radius of the pendant drop at z = height * R_0
+def scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, height):
+ lenpoints = len(xypoints)
+ points_to_return = 5 # number of data points to average over
+ z_value = y_apex + height * radius_apex
+ if xypoints[-1][1] < z_value:
+ # print('ERROR: not enough data points to accurately guess the Bond number')
+ # sys.exit(1)
+ return -1
+ index = 0
+ while xypoints[index][1] < z_value:
+ index += 1
+ if (index < points_to_return) or ((lenpoints-index) < points_to_return):
+# print('ERROR: not enough data points to average over')
+# sys.exit(1)
+ return -2
+ sum_radius = 0.0
+ for k in range(index-points_to_return,index+points_to_return):
+ sum_radius += abs(xypoints[k][0] - x_apex)
+ scaled_radius = sum_radius / (2 * points_to_return * radius_apex)
+ return scaled_radius
+
+# # determines the maximum required arc length, max_s
+# def calculate_max_s(xypoints):
+# lenpoints = len(xypoints)
+# global s_left
+# global s_right
+# global s_max
+# s_max = 0.5 * smax
+# s_left = 0.
+# s_right = 0.
+# ptsConsider = 10
+# for i in range(lenpoints-ptsConsider,lenpoints):
+# x, y = xypoints[i]
+# minimumDistance(x, y, s_max)
+# s_max = max(s_max, s_left, s_right)
diff --git a/modules/initialise_parameters.pyc b/modules/initialise_parameters.pyc
new file mode 100644
index 0000000..e4b9f44
Binary files /dev/null and b/modules/initialise_parameters.pyc differ
diff --git a/modules/initialise_parameters.py~ b/modules/initialise_parameters.py~
new file mode 100644
index 0000000..400b4ff
--- /dev/null
+++ b/modules/initialise_parameters.py~
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+#coding=utf-8
+import math
+import sys
+
+
+def initialise_parameters(experimental_drop, drop_data):
+ omega_rotation = 0.0 # initial rotation angle (should revisit this)
+ [x_apex, y_apex, radius_apex] = fit_circle(experimental_drop.drop_data)
+ bond_number = calculate_Bond_number(experimental_drop.drop_data, x_apex, y_apex, radius_apex)
+ drop_data.params = [x_apex, y_apex, radius_apex, bond_number, omega_rotation]
+ # maybe calculate_max_s() to determine initial max_s - although current version can handle max_s being too small
+ drop_data.max_s = 4.0
+
+
+# fits a circle to the drop apex to calculate the (x, y) coordinate and apex radius R_0
+def fit_circle(xypoints):
+ lenpoints = len(xypoints)
+ sumX = 0.0
+ sumY = 0.0
+ sumX2 = 0.0
+ sumY2 = 0.0
+ sumXY = 0.0
+ sumX3 = 0.0
+ sumY3 = 0.0
+ sumX2Y = 0.0
+ sumXY2 = 0.0
+ n = max(10, int(0.1 * lenpoints)) # ensure at least 10 points are used...
+ if n > lenpoints:
+ n = lenpoints # if there is not enough points take all points
+ for k in range(0, n):
+ xk, yk = xypoints[k]
+ sumX += xk
+ sumY += yk
+ sumX2 += xk**2
+ sumY2 += yk**2
+ sumXY += xk * yk
+ sumX3 += xk**3
+ sumY3 += yk**3
+ sumX2Y += (xk**2)*yk
+ sumXY2 += xk*(yk**2)
+ d11 = n * sumXY - sumX * sumY
+ d20 = n * sumX2 - sumX**2
+ d02 = n * sumY2 - sumY**2
+ d30 = n * sumX3 - sumX2 * sumX
+ d03 = n * sumY3 - sumY2 * sumY
+ d21 = n * sumX2Y - sumX2 * sumY
+ d12 = n * sumXY2 - sumX * sumY2
+ x = ((d30 + d12) * d02 - (d03 + d21) * d11) / (2 * (d20 * d02 - d11**2))
+ y = ((d03 + d21) * d20 - (d30 + d12) * d11) / (2 * (d20 * d02 - d11**2))
+ c = (sumX2 + sumY2 - 2 * x *sumX - 2 * y * sumY) / n
+ R = math.sqrt(c + x**2 + y**2)
+ return [x, y - R, R]
+
+# calculates the initial guess for the Bond number using method
+# from Neeson et al. (see Mathematica Notebook - CalculatingBond.nb)
+def calculate_Bond_number(xypoints, x_apex, y_apex, radius_apex):
+ r_z2 = scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, 2)
+ if r_z2 > 0: #JB edit 26/3/15
+ bond = 0.1756 * r_z2**2 + 0.5234 * r_z2**3 - 0.2563 * r_z2**4 # interpolated from numerical data
+ return bond
+ r_z1 = scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, 1)
+# if r_z1 > 0: #JB edit 26/3/15
+# bond = 5.819 * (r_z1 - 1) # interpolated from numerical data
+# return bond
+ # finally, if nether of these work, just use a naive guess
+ return 0.15 #JB edit 26/3/2015
+
+# calculates the radius of the pendant drop at z = height * R_0
+def scaled_radius_at_scaled_height(xypoints, x_apex, y_apex, radius_apex, height):
+ lenpoints = len(xypoints)
+ points_to_return = 5 # number of data points to average over
+ z_value = y_apex + height * radius_apex
+ if xypoints[-1][1] < z_value:
+ # print('ERROR: not enough data points to accurately guess the Bond number')
+ # sys.exit(1)
+ return -1
+ index = 0
+ while xypoints[index][1] < z_value:
+ index += 1
+ if (index < points_to_return) or ((lenpoints-index) < points_to_return):
+# print('ERROR: not enough data points to average over')
+# sys.exit(1)
+ return -2
+ sum_radius = 0.0
+ for k in range(index-points_to_return,index+points_to_return):
+ sum_radius += abs(xypoints[k][0] - x_apex)
+ scaled_radius = sum_radius / (2 * points_to_return * radius_apex)
+ return scaled_radius
+
+# # determines the maximum required arc length, max_s
+# def calculate_max_s(xypoints):
+# lenpoints = len(xypoints)
+# global s_left
+# global s_right
+# global s_max
+# s_max = 0.5 * smax
+# s_left = 0.
+# s_right = 0.
+# ptsConsider = 10
+# for i in range(lenpoints-ptsConsider,lenpoints):
+# x, y = xypoints[i]
+# minimumDistance(x, y, s_max)
+# s_max = max(s_max, s_left, s_right)
diff --git a/modules/parameters.csv b/modules/parameters.csv
new file mode 100644
index 0000000..f358efc
--- /dev/null
+++ b/modules/parameters.csv
@@ -0,0 +1,23 @@
+Drop ID method,Automated
+Image thresholding method,Automated
+Image thresholding value,50.0
+Baseline method,Automated
+Edge finder,OpenCV
+Continuous density,827.0
+Needle diameter,1.27
+Plot residuals,1
+Plot profiles,1
+Plot IFT,1
+Perform tangent fit,1
+Perform polynomial fit,1
+Perform circle fit,1
+Perform ellipse fit,1
+Perform YL fit,1
+Perform ML model prediction,1
+Image source,Local images
+Number of frames,1
+Wait time,1
+Save images,1
+Create new data folder,0
+Filename,temp
+Directory,/Users/dgshaw/Library/CloudStorage/OneDrive-TheUniversityofMelbourne/files/conan/conan-paper
diff --git a/modules/parameters.csv~ b/modules/parameters.csv~
new file mode 100644
index 0000000..8461f19
--- /dev/null
+++ b/modules/parameters.csv~
@@ -0,0 +1,13 @@
+Drop density,1000.0
+Continuous density,827.0
+Needle diameter,1.27
+Plot residuals,1
+Plot profiles,1
+Plot IFT,1
+Image source,Local Images
+Number of frames,600
+Wait time,10
+Save images,0
+Create new data folder,0
+Filename,test
+Directory,/home/smaclab/Desktop/sumit/SMO
diff --git a/modules/polynomial_fit.py b/modules/polynomial_fit.py
new file mode 100644
index 0000000..f49b5ec
--- /dev/null
+++ b/modules/polynomial_fit.py
@@ -0,0 +1,1097 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+"""This code serves as a discrete instance of the polynomial fit method of
+contact angle analysis.
+
+Polynomial fit code taken from the most recent version of conan -
+conan-ML_cv1.1/modules/select_regions.py"""
+
+# Polynomial fit from the most recent version of conan - conan-ML_v1.1/modules/select_regions.py
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import scipy.optimize as opt
+import numba
+import math
+from scipy.spatial import distance
+from scipy.integrate import solve_ivp
+import numpy as np
+import matplotlib.pyplot as plt
+import cv2
+import time
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group)
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(sample[i])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = x
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = y
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+
+ # if there are any large jumps in distance, there is likely a mistake
+ # therefore, the points after this jump should be ignored
+ if 1:
+ dists = []
+ for i, point in enumerate(path):
+ if i < len(path)-1:
+ dists.append(distance1(path[i], path[i+1]))
+ jump_idx = []
+ for i, dist in enumerate(dists):
+ if dist > 5:
+ jump_idx.append(i)
+ if len(jump_idx)>0:
+ path = path[:jump_idx[0]]
+
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=False,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+ # scan for clusers to remove noise and circle from lensing effect
+ ################ MAY NEED TO OPTIMIZE eps/xi TO FIND APPROPRIATE GROUPINGS ####################
+ if cluster: # turn this off bc using synthetic drops without lensing effect
+ input_contour = coords
+ dic,dic2 = cluster_OPTICS(input_contour,xi=xi),cluster_OPTICS(input_contour,out_style='xy',xi=xi)
+
+ #print("number of groups: ",len(list(dic.keys())))
+
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(list(dic.keys())))))
+ for k in dic.keys():
+ plt.plot(dic2[str(k)+'x'],dic2[str(k)+'y'], 'o',color=next(colors))
+ plt.title(str(len(dic.keys()))+" groups found by clustering")
+ plt.show()
+ plt.close()
+ maxkey=max(dic, key=lambda k: len(dic[k]))
+
+ #print('key to longest dictionary entry is: ',maxkey)
+
+ # take the longest group
+ longest = dic[maxkey]
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 1: # plot the bottom 10% of the contour
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ try:
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ except:
+ gray = img
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+ return output
+
+def polynomial_closest_point(xp, yp, poly_points, display=False):
+ """
+ xp (float): The x-coordinate of the reference point
+ yp (float): The y-coordinate of the reference point
+ poly_points (array): The array of x, y coordinates outputted by the polynomial fit
+ display (Boolean): Set to True to output figures and information.
+
+ Returns:
+ The distance between the reference point and the polynomial fit, and
+ the coordinates of the closest point on the polynomial fit.
+
+ """
+
+ x = poly_points[:,0]
+ y = poly_points[:,1]
+
+ dist = np.sqrt((x - xp) ** 2 + (y - yp) ** 2)
+ idx = list(dist).index(min(dist))
+
+ #ddistdt = ((b ** 2 - a ** 2) * np.cos(t) + a * np.sin(np.deg2rad(th)) * yp - a * np.sin(np.deg2rad(th)) * yc + a * np.cos(np.deg2rad(th)) * xp - a * np.cos(np.deg2rad(th)) * xc) * np.sin(t) + ((-b * np.cos(np.deg2rad(th)) * yp) + b * np.cos(np.deg2rad(th)) * yc + b * np.sin(np.deg2rad(th)) * xp - b * np.sin(np.deg2rad(th)) * xc) * np.cos(t)
+ #idx = np.where(ddistdt[1:] * ddistdt[:-1] < 0)[0] # find zeros
+ #m = (ddistdt[idx + 1] - ddistdt[idx]) / (t[idx + 1] - t[idx]) # slope
+ if display:
+ plt.figure(1)
+ plt.plot(x, y, '-', xp, yp, 'r+', x[idx], y[idx], 'r+')
+ plt.xlabel('x')
+ plt.ylabel('y')
+ plt.title('Circle, Point, and Zeros')
+
+ plt.figure(2)
+ for t, d in enumerate(dist):
+ plt.plot(t, d, 'm.')
+ plt.plot(idx, dist[idx], 'cx')
+ plt.xlabel('index value of list')
+ plt.ylabel('Distance')
+ plt.title('Distance Function')
+
+ print(f'xp: {xp}, x[idx]: {x[idx]}')
+ print(f'yp: {yp}, y[idx]: {y[idx]}')
+ print('Error is: ', dist[idx])
+
+ plt.show()
+ plt.close()
+
+ return dist[idx], [x[idx],y[idx]]
+
+def polynomial_fit_errors(pts1,pts2,fit_left,fit_right, display=False):
+ """
+ Calculates the minimum distance between a point and a point of the polynomial fit.
+
+ Parameters:
+ pts1 (array): The array of x, y coordindate points for the points near the left contact point
+ pts2 (array): The array of x, y coordindate points for the points near the right contact point
+ fit_left (np.polyfit): The fit found by np.polyfit for pts1
+ fit_right (np.polyfit): The fit found by np.polyfit for pts2
+ display (boolean): Set to true to show figures.
+
+ Returns:
+ dictionary: The MAE, MSE, RMSE, and maximum error of the contour as compared against the
+ polynomial fit.
+ """
+ highresx_left = np.linspace(pts1[0,0], pts1[-1,0],5*pts1.shape[0])
+ highresx_right = np.linspace(pts2[0,0], pts2[-1,0],5*pts2.shape[0])
+ highres_fit_left = fit_left(highresx_left)
+ highres_fit_right = fit_right(highresx_right)
+
+ poly_points_left = np.array(list(zip(highresx_left,highres_fit_left)))
+ poly_points_right = np.array(list(zip(highresx_right,highres_fit_right)))
+
+ error_measures = {}
+
+ # for left
+ errors_left = []
+ for point in pts1:
+ dist2edge, edge_point = polynomial_closest_point(point[0], point[1], poly_points_left, display=display)
+ errors_left.append(dist2edge)
+
+ error_measures['MAE left'] = sum([abs(error) for error in errors_left])/len(errors_left)
+ error_measures['MSE left'] = sum([error**2 for error in errors_left])/len(errors_left)
+ error_measures['RMSE left'] = np.sqrt(sum([error**2 for error in errors_left])/len(errors_left))
+ error_measures['Maximum error left'] = max(errors_left)
+
+ # for right
+ errors_right = []
+ for point in pts2:
+ dist2edge, edge_point = polynomial_closest_point(point[0], point[1], poly_points_right, display=display)
+ errors_right.append(dist2edge)
+
+ error_measures['MAE right'] = sum([abs(error) for error in errors_right])/len(errors_right)
+ error_measures['MSE right'] = sum([error**2 for error in errors_right])/len(errors_right)
+ error_measures['RMSE right'] = np.sqrt(sum([error**2 for error in errors_right])/len(errors_right))
+ error_measures['Maximum error right'] = max(errors_right)
+
+ error_measures['MAE'] = sum([error_measures['MAE left'],error_measures['MAE right']])/2
+ error_measures['MSE'] = sum([error_measures['MSE left'],error_measures['MSE right']])/2
+ error_measures['RMSE'] = sum([error_measures['RMSE left'],error_measures['RMSE right']])/2
+ error_measures['Maximum error'] = max([error_measures['Maximum error left'],error_measures['Maximum error right']])
+
+ return error_measures
+
+def polynomial_fit_img(img,
+ Npts=15,
+ polynomial_degree=2,
+ display=False):
+ """Takes in input contact angle experimental image, and outputs the fitted
+ angles and intercept coordinates. For best results, preprocessing should be
+ performed before calling this function.
+
+ The first and last Npts many points are are fitted to a polynomial, and contact
+ angles are calculated by taking the gradient of the polynomial at the contact
+ points. The tilt of the surface is accounted for by taking the gradient between
+ contact points and balancing the polynomial angle against the surface gradient.
+
+ Coords is the coordinates of the drop outline in the form of an array or list.
+ Npts is the number of points included in the polynomial fit. The default number
+ of points used is 10.
+ display can be set to "True" to output figures."""
+
+ # begin with method specific preprocessing of img data
+ start_time = time.time()
+
+ edges_pts = extract_edges_CV(img) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ tangent_drop,CPs = prepare_hydrophobic(edges_pts,display)
+
+ intercepts = [[tangent_drop[0,0],tangent_drop[0,1]],[tangent_drop[-1,0],tangent_drop[-1,1]]]
+
+ # timers
+ fit_preprocessing_time = time.time() - start_time
+ fit_start_time = time.time()
+
+ # start the fit
+ Ndrop = np.shape(tangent_drop)[0]
+ pts1 = tangent_drop[:Npts]
+ pts2 = tangent_drop[-Npts:]
+ pts2 = pts2[::-1] #reverse so that CP is first
+
+ fit_local1 = np.polyfit(pts1[:,0], pts1[:,1] ,polynomial_degree)
+ fit_local2 = np.polyfit(pts2[:,0], pts2[:,1] ,polynomial_degree)
+
+ line_local1 = np.poly1d(fit_local1)
+ line_local2 = np.poly1d(fit_local2)
+
+ if polynomial_degree > 1:
+ x_local1 = np.array([min(pts1[:,0]),max(pts1[:,0])])
+ f_local1 = line_local1(x_local1)
+ f_local1_prime = line_local1.deriv(1)
+
+ x_local2 = np.array([min(pts2[:,0])-10,max(pts2[:,0])+10])
+ f_local2 = line_local2(x_local2)
+ f_local2_prime = line_local2.deriv(1)
+
+ tangent1 = f_local1_prime(pts1[0,0])*(x_local1-pts1[0,0])+pts1[0,1]
+ tangent2 = f_local2_prime(pts2[0,0])*(x_local2-pts2[0,0])+pts2[0,1]
+
+ m1 = f_local1_prime(pts1[0,0])
+ m2 = f_local2_prime(pts2[0,0])
+ else:
+ m1 = fit_local1[0]
+ m2 = fit_local2[0]
+
+ if display:
+ plt.plot(edges_pts[:,0],edges_pts[:,1], 'o', color='pink')
+ plt.plot(pts1[:,0],pts1[:,1],'ro')
+ plt.plot(pts1[:,0],line_local1(pts1[:,0]),'y-')
+ plt.imshow(img)
+ pts1_width = abs(pts1[:,0][-1]-pts1[:,0][0])
+ pts1_height = abs(pts1[:,1][-1]-pts1[:,1][0])
+ plt.xlim(min(pts1[:,0])-pts1_width,max(pts1[:,0])+pts1_width)
+ plt.ylim(max(pts1[:,1])+pts1_height,min(pts1[:,1])-pts1_height)
+ plt.title('fitted points left side')
+ plt.show()
+ plt.close()
+
+ plt.plot(edges_pts[:,0],edges_pts[:,1], 'o', color='pink')
+ plt.plot(pts2[:,0],pts2[:,1],'ro')
+ plt.plot(pts2[:,0],line_local2(pts2[:,0]),'y-')
+ plt.imshow(img)
+ pts2_width = abs(pts2[:,0][-1]-pts2[:,0][0])
+ pts2_height = abs(pts2[:,1][-1]-pts2[:,1][0])
+ plt.xlim(min(pts2[:,0])-pts2_width,max(pts2[:,0])+pts2_width)
+ plt.ylim(max(pts2[:,1])+pts2_height,min(pts2[:,1])-pts2_height)
+ plt.title('fitted points right side')
+ plt.show()
+ plt.close()
+
+ m_surf = float(CPs[1][1]-CPs[0][1])/float(CPs[1][0]-CPs[0][0])
+
+ if (m1 > 0):
+ contact_angle1 = np.pi-np.arctan((m1-m_surf)/(1+m1*m_surf))
+ elif(m1 < 0):
+ contact_angle1 = -np.arctan((m1-m_surf)/(1+m1*m_surf))
+ else:
+ contact_angle1 = np.pi/2
+
+ if (m2 < 0):
+ contact_angle2 = np.pi+np.arctan((m2-m_surf)/(1+m2*m_surf))
+ elif(m2 > 0):
+ contact_angle2 = np.arctan((m2-m_surf)/(1+m2*m_surf))
+ else:
+ contact_angle2 = np.pi/2
+
+ contact_angle1 = contact_angle1*180/np.pi
+ contact_angle2 = contact_angle2*180/np.pi
+
+ fit_time = time.time() - fit_start_time
+
+ errors = polynomial_fit_errors(pts1,pts2,line_local1,line_local2,False)
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['method specific preprocessing time'] = fit_preprocessing_time
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return [contact_angle1, contact_angle2], intercepts, errors, timings
+
+def polynomial_fit(profile, Npts=15, polynomial_degree=2, display=False):
+ """Takes in input contact angle experimental image, and outputs the fitted
+ angles and intercept coordinates. For best results, preprocessing should be
+ performed before calling this function.
+
+ The first and last Npts many points are are fitted to a polynomial, and contact
+ angles are calculated by taking the gradient of the polynomial at the contact
+ points. The tilt of the surface is accounted for by taking the gradient between
+ contact points and balancing the polynomial angle against the surface gradient.
+
+ Coords is the coordinates of the drop outline in the form of an array or list.
+ Npts is the number of points included in the polynomial fit. The default number
+ of points used is 10.
+ display can be set to "True" to output figures."""
+
+ start_time = time.time()
+ CPs = [profile[0],profile[-1]]
+
+ # start the fit
+ tangent_drop = profile.copy()
+ Ndrop = np.shape(tangent_drop)[0]
+ pts1 = tangent_drop[:Npts]
+ pts2 = tangent_drop[-Npts:]
+ pts2 = pts2[::-1] #reverse so that CP is first
+
+ fit_local1 = np.polyfit(pts1[:,0], pts1[:,1] ,polynomial_degree)
+ fit_local2 = np.polyfit(pts2[:,0], pts2[:,1] ,polynomial_degree)
+
+ line_local1 = np.poly1d(fit_local1)
+ line_local2 = np.poly1d(fit_local2)
+
+ if polynomial_degree > 1:
+ #x_local1 = np.array([min(pts1[:,0])-10,max(pts1[:,0])+10])
+ x_local1 = np.array([min(pts1[:,0]),max(pts1[:,0])])
+ f_local1 = line_local1(x_local1)
+ f_local1_prime = line_local1.deriv(1)
+
+ #x_local2 = np.array([min(pts2[:,0])-10,max(pts2[:,0])+10])
+ x_local2 = np.array([min(pts2[:,0]),max(pts2[:,0])])
+ f_local2 = line_local2(x_local2)
+ f_local2_prime = line_local2.deriv(1)
+
+ tangent1 = f_local1_prime(pts1[0,0])*(x_local1-pts1[0,0])+pts1[0,1]
+ tangent2 = f_local2_prime(pts2[0,0])*(x_local2-pts2[0,0])+pts2[0,1]
+
+ tangent_lines = (((int(x_local1[0]),int(tangent1[0])),(int(x_local1[1]),int(tangent1[1]))),((int(x_local2[0]),int(tangent2[0])),(int(x_local2[1]),int(tangent2[1]))))
+
+ m1 = f_local1_prime(pts1[0,0])
+ m2 = f_local2_prime(pts2[0,0])
+ else:
+ m1 = fit_local1[0]
+ m2 = fit_local2[0]
+
+ tangent_lines = ((tuple(pts1[0]),(int(pts1[-1][0]),int((m1*pts1[-1][0])+fit_local1[1]))),(tuple(pts2[0]),(int(pts2[-1][0]),int((m2*pts2[-1][0])+fit_local2[1]))))
+
+ if display:
+ plt.plot(profile[:,0],profile[:,1], 'o', color='pink')
+ plt.plot(pts1[:,0],pts1[:,1],'ro')
+ plt.plot(pts1[:,0],line_local1(pts1[:,0]),'y-')
+ #plt.imshow(img)
+ pts1_width = abs(pts1[:,0][-1]-pts1[:,0][0])
+ pts1_height = abs(pts1[:,1][-1]-pts1[:,1][0])
+ plt.xlim(min(pts1[:,0])-pts1_width,max(pts1[:,0])+pts1_width)
+ plt.ylim(max(pts1[:,1])+pts1_height,min(pts1[:,1])-pts1_height)
+ plt.title('fitted points left side')
+ plt.show()
+ plt.close()
+
+ plt.plot(profile[:,0],profile[:,1], 'o', color='pink')
+ plt.plot(pts2[:,0],pts2[:,1],'ro')
+ plt.plot(pts2[:,0],line_local2(pts2[:,0]),'y-')
+ #lt.imshow(img)
+ pts2_width = abs(pts2[:,0][-1]-pts2[:,0][0])
+ pts2_height = abs(pts2[:,1][-1]-pts2[:,1][0])
+ plt.xlim(min(pts2[:,0])-pts2_width,max(pts2[:,0])+pts2_width)
+ plt.ylim(max(pts2[:,1])+pts2_height,min(pts2[:,1])-pts2_height)
+ plt.title('fitted points right side')
+ plt.show()
+ plt.close()
+
+ m_surf = float(CPs[1][1]-CPs[0][1])/float(CPs[1][0]-CPs[0][0])
+
+ if (m1 > 0):
+ contact_angle1 = np.pi-np.arctan((m1-m_surf)/(1+m1*m_surf))
+ elif(m1 < 0):
+ contact_angle1 = -np.arctan((m1-m_surf)/(1+m1*m_surf))
+ else:
+ contact_angle1 = np.pi/2
+
+ if (m2 < 0):
+ contact_angle2 = np.pi+np.arctan((m2-m_surf)/(1+m2*m_surf))
+ elif(m2 > 0):
+ contact_angle2 = np.arctan((m2-m_surf)/(1+m2*m_surf))
+ else:
+ contact_angle2 = np.pi/2
+
+ contact_angle1 = contact_angle1*180/np.pi
+ contact_angle2 = contact_angle2*180/np.pi
+
+ fit_time = time.time() - start_time
+
+ errors = polynomial_fit_errors(pts1,pts2,line_local1,line_local2,False)
+
+ analysis_time = time.time() - start_time
+
+ timings = {}
+ timings['fit time'] = fit_time
+ timings['analysis time'] = analysis_time
+
+ return [contact_angle1, contact_angle2], CPs, tangent_lines, errors, timings
+
+if 0:
+ IMG_PATH = '../../RICOphobic_cropped.png'
+ img = cv2.imread(IMG_PATH)
+
+ angles, intercepts, errors, timings = polynomial_fit_img(img,display=False)
+
+ if 1:
+ print('angles: ', angles)
+ print('intercetps: ', intercepts)
+ print('errors: ', errors)
+ print('timings: ', timings)
+
+ print('done')
diff --git a/modules/preprocessing.py b/modules/preprocessing.py
new file mode 100644
index 0000000..265d4ad
--- /dev/null
+++ b/modules/preprocessing.py
@@ -0,0 +1,1137 @@
+#!/usr/bin/env python
+#coding=utf-8
+
+"""This code serves as a discrete instance of image preprocessing before contact
+angle fit software is implemented
+
+This includes automatic identification of the drop through Hough transform,
+followed by cropping of the image to isolate the drop. Tilt correction is then
+performed using the identified contact points of the drop.
+"""
+
+
+
+from sklearn.cluster import OPTICS # for clustering algorithm
+import numpy as np
+import matplotlib.pyplot as plt
+import cv2
+import math #for tilt_correction
+from scipy import misc, ndimage #for tilt_correction
+
+def auto_crop(img, low=50, high=150, apertureSize=3, verbose=0): # DS 08/06/23
+ '''
+ Automatically identify where the crop should be placed within the original
+ image
+
+ This function utilizes the opencv circular and linear Hough transfrom
+ implementations to identify the most circular object in the image
+ (the droplet), and center it within a frame that extends by padding to each
+ side.
+
+ :param img: 2D numpy array of [x,y] coordinates of the edges of the
+ image
+ :param low: Value of the weak pixels in the dual thresholding
+ :param high: Value of the strong pixels in the dual thresholding
+ :param apertureSize: The aperture size variable given to cv2.Canny during
+ edge detection
+ :param verbose: Integer values from 0 to 2, giving varying degrees of detail
+ :return: list of [left, right, top, bottom] values for the edges of the
+ bounding box
+ '''
+
+
+ if verbose >=1:
+ print('Performing auto-cropping, please wait...')
+
+ # find edges in the image
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
+ edges = cv2.Canny(gray,low,high,apertureSize = apertureSize)
+
+ #hough circle to find droplet - minRadius at 5% img width
+ circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1,
+ minDist=max(img.shape), #one circle
+ param1=30,
+ param2=15,
+ minRadius=int(img.shape[1]*0.02), #0.05
+ maxRadius=0)
+
+ if circles is not None:
+ circles = np.uint16(np.around(circles))
+ for i in circles[0, :]:
+ center = (i[0], i[1])
+ radius = i[2]
+
+ if verbose >= 2:
+ circle1 = plt.Circle(center, 1, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.imshow(img)
+ plt.title("Hough circle")
+ plt.show()
+ plt.close()
+ else:
+ #hough circle to find droplet - minRadius at 0
+ circles = cv2.HoughCircles(edges, cv2.HOUGH_GRADIENT, 1,
+ minDist=max(img.shape), #one circle
+ param1=30,
+ param2=20,
+ minRadius=0,
+ maxRadius=0)
+ if circles is not None:
+ circles = np.uint16(np.around(circles))
+ for i in circles[0, :]:
+ center = (i[0], i[1])
+ radius = i[2]
+
+ if verbose >= 2:
+ circle1 = plt.Circle(center, 1, color='r')
+ # now make a circle with no fill, which is good for hi-lighting key results
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+
+ ax = plt.gca()
+ ax.axis('equal')
+
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+
+ fig = plt.gcf()
+ fig.set_size_inches(10, 10)
+
+ plt.imshow(img)
+ plt.title("Hough circle")
+ plt.show()
+ plt.close()
+ else:
+ print('Hough circle failed to identify a drop')
+
+ #crop image based on circle found (this prevents hough line identifying the needle)
+ bottom = int(center[1] + (radius * 1.2)) # add 20% padding
+ if bottom>img.shape[0]:
+ bottom = img.shape[0]
+ top = int(center[1] - (radius * 1.2)) #add 20% padding
+ if top < 0:
+ top=0
+
+ img = img[top:bottom,:]
+ edges = cv2.Canny(img,50,150,apertureSize = 3)
+ center = (center[0], -(center[1] - bottom)) #reassign circle center to new cropped image
+
+ if verbose>=2:
+ plt.imshow(img)
+ plt.title('image after top and bottom crop')
+ plt.show()
+ plt.close()
+
+ #hough lines to find baseline
+ lines = cv2.HoughLines(edges,1,np.pi/180,100)
+
+ if lines is not None: # if the HoughLines function is successful
+ if verbose >= 2:
+ print('shape of image: ',img.shape)
+ for i,line in enumerate(lines):
+ if i==0:
+ rho,theta = line[0]
+ a = np.cos(theta)
+ b = np.sin(theta)
+ x0 = a*rho
+ y0 = b*rho
+ x1 = int(x0)
+ y1 = int(y0 + 1000*(a))
+ x2 = int(x0 - img.shape[1]*(-b))
+ y2 = int(y0 - 1000*(a))
+ if verbose >= 2:
+ plt.title('hough approximated findings')
+ plt.imshow(img)
+ circle1 = plt.Circle(center, 1, color='r')
+ circle2 = plt.Circle(center, radius, color='r', fill=False)
+ ax = plt.gca()
+ ax.add_patch(circle1)
+ ax.add_patch(circle2)
+ fig = plt.gcf()
+
+ plt.plot([x1,x2],[y1,y2],'r')
+ fig = plt.gcf()
+ p1,p2 = (x1,y1),(x2,y2) # baseline exists between these points
+ if verbose >= 2:
+ print('baseline goes from ',p1,' to ',p2)
+
+ # now find bounds
+
+ # find intercept of line and circle
+ dx, dy = p2[0] - p1[0], p2[1] - p1[1]
+
+ a = dx**2 + dy**2
+ b = 2 * (dx * (p1[0] - center[0]) + dy * (p1[1] - center[1]))
+ c = (p1[0] - center[0])**2 + (p1[1] - center[1])**2 - radius**2
+
+ discriminant = b**2 - 4 * a * c
+ if discriminant > 0:
+ t1 = (-b + discriminant**0.5) / (2 * a)
+ t2 = (-b - discriminant**0.5) / (2 * a)
+
+ intersect1, intersect2 = None,None
+ intersect1, intersect2 = (dx * t1 + p1[0], dy * t1 + p1[1]), (dx * t2 + p1[0], dy * t2 + p1[1])
+
+ if intersect1 == None or intersect2 == None:
+ raise 'found baseline does not intersect with found circle'
+
+ if verbose >= 2:
+ plt.plot(intersect1[0],intersect1[1],'o',color='orange')
+ plt.plot(intersect2[0],intersect2[1],'o',color='orange')
+
+ plt.show()
+ plt.close()
+
+ bottom = int(max([intersect1[1],intersect2[1]])) #max value of intersect points
+ top = int(center[1] - radius) #assume top of drop is in image
+ if center[1] < max([intersect1[1],intersect2[1]]):
+ right = int(center[0] + radius)
+ else:
+ right = int(max([intersect1[0],intersect2[0]]))
+ if center[1] < min([intersect1[1],intersect2[1]]):
+ left = int(center[0] - radius)
+ else:
+ left = int(min([intersect1[0],intersect2[0]]))
+ else:
+ # bounds is (left,right,top,bottom)
+ print('No baseline-drop intercept found')
+ left = int(center[0] - radius)
+ right = int(center[0] + radius)
+ top = int(center[1] - radius)
+ bottom = int(center[1] + radius)
+
+ else: #if the HoughLine function cannot identify a surface line, use the circle as a guide for bounds
+ left = int(center[0] - radius)
+ right = int(center[0] + radius)
+ top = int(center[1] - radius)
+ bottom = int(center[1] + radius)
+
+ pad = int(max([right - left,bottom-top])/4)
+
+ top -= pad
+ bottom += pad
+ left -= pad
+ right += pad
+
+ if left < 0:
+ left = 0
+ if top < 0:
+ top = 0
+ if bottom > img.shape[0]:
+ bottom = img.shape[0]
+ if right > img.shape[1]:
+ right = img.shape[1]
+
+ if verbose >= 2:
+ print('lower most y coord of drop: ', bottom)
+ print('upper most y coord of drop: ', top)
+ print('right most x coord of drop: ', right)
+ print('left most x coord of drop: ', left)
+
+ bounds = [left,right,top,bottom]
+ new_img = img[top:bottom,left:right]
+
+ if verbose >= 1:
+ plt.title('cropped drop')
+ plt.imshow(new_img)
+ plt.show()
+ plt.close()
+
+ return new_img, bounds
+
+def find_intersection(baseline_coeffs, circ_params):
+ '''
+ Compute the intersection points between the best fit circle and best-fit
+ baseline.
+
+ For this we rely on several coordinate transformations, first a
+ translation to the centerpoint of the circle and then a rotation to give
+ the baseline zero-slope.
+
+ :param baseline_coeffs: Numpy array of coefficients to the baseline
+ polynomial
+ :param circ_params: centerpoint and radius of best-fit circle
+ :return: (x,y) point of intersection between these two shapes
+ '''
+ *z, r = circ_params
+ b, m = baseline_coeffs[0:2]
+ # Now we need to actually get the points of intersection
+ # and the angles from these fitted curves. Rather than brute force
+ # numerical solution, use combinations of coordinate translations and
+ # rotations to arrive at a horizontal line passing through a circle.
+ # First step will be to translate the origin to the center-point
+ # of our fitted circle
+ # x = x - z[0], y = y - z[1]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = m * x + (m * z[0] + b - z[1])
+ # Now we need to rotate clockwise about the origin by an angle q,
+ # s.t. tan(q) = m
+ # Our transformation is defined by the typical rotation matrix
+ # [x;y] = [ [ cos(q) , sin(q) ] ;
+ # [-sin(q) , cos(q) ] ] * [ x ; y ]
+ # Circle : x**2 + y**2 = r**2
+ # Line : y = (m*z[0] + b[0] - z[1])/sqrt(1 + m**2)
+ # (no dependence on x - as expected)
+
+ # With this simplified scenario, we can easily identify the points
+ # (x,y) where the line y = B
+ # intersects the circle x**2 + y**2 = r**2
+ # In our transformed coordinates, only keeping the positive root,
+ # this is:
+
+ B = (m * z[0] + b - z[1]) / np.sqrt(1 + m**2)
+
+ if B > r:
+ raise ValueError("The circle and baseline do not appear to intersect")
+ x_t = np.sqrt(r ** 2 - B ** 2)
+ y_t = B
+
+ # TODO:// replace the fixed linear baseline with linear
+ # approximations near the intersection points
+
+ return x_t, y_t
+
+def cluster_OPTICS(sample, out_style='coords',xi=None,eps=None,verbose=0):
+ """Takes an array (or list) of the form [[x1,y1],[x2,y2],...,[xn,yn]].
+ Clusters are outputted in the form of a dictionary.
+
+ If out_style='coords' each dictionary entry is a group, and points are outputted in as a numpy array
+ in coordinate form.
+ If out_xy='xy' there are two dictionary entries for each group, one labeled as nx and one as ny
+ (where n is the label of the group). Each of these are 1D numpy arrays
+
+ If xi (float between 0 and 1) is not None and eps is None, then the xi clustering method is used.
+ The optics algorithm defines clusters based on the minimum steepness on the reachability plot.
+ For example, an upwards point in the reachability plot is defined by the ratio from one point to
+ its successor being at most 1-xi.
+
+ If eps (float) is not None and xi is None, then the dbscan clustering method is used. Where eps is the
+ maximum distance between two samples for one to be considered as in the neighborhood of the other.
+
+ https://stackoverflow.com/questions/47974874/algorithm-for-grouping-points-in-given-distance
+ https://scikit-learn.org/stable/modules/generated/sklearn.cluster.OPTICS.html
+ """
+ if eps != None and xi==None:
+ clustering = OPTICS(min_samples=2,cluster_method = 'dbscan',eps = eps).fit(sample) # cluster_method changed to dbscan (so eps can be set)
+ elif xi != None and eps==None:
+ clustering = OPTICS(min_samples=2,xi=xi).fit(sample) # original had xi = 0.05, xi as 0.1 in function input
+ else:
+ raise 'Error: only one of eps and xi can be chosen but not neither nor both'
+ groups = list(set(clustering.labels_))
+
+ if verbose==2:
+ print(clustering.labels_)
+ elif verbose==1:
+ print(groups)
+ elif verbose==0:
+ pass
+
+ dic = {}
+ for n in groups:
+ if n not in dic:
+ dic[n] = []
+ for i in range(len(sample)):
+ if clustering.labels_[i] == n:
+ dic[n].append(list(sample[i]))
+ dic[n] = np.array(dic[n])
+
+ # separate points and graph
+ dic2={}
+ for k in dic.keys():
+ x = []
+ y = []
+ for i in range(len(dic[k])):
+ x.append(dic[k][i][0])
+ dic2[str(k)+'x'] = np.array(x)
+ for i in range(len(dic[k])):
+ y.append(dic[k][i][1])
+ dic2[str(k)+'y'] = np.array(y)
+
+
+ if out_style=='coords':
+ return dic
+ elif out_style=='xy':
+ return dic2
+
+def distance1(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance1(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+ return path
+
+def prepare_hydrophobic(coords,xi=0.8,cluster=True,display=False):
+ """takes an array (n,2) of coordinate points, and returns the left and right halfdrops of the contour.
+ xi determines the minimum steepness on the reachability plot that constitutes a cluster boundary of the
+ clustering algorithm.
+ deg is the degree of the polynomial used to describe the shape of the droplet.
+
+ This code is adapted from the prepare module, but this version differs in that it assumes that the drop
+ is hydrophobic."""
+ coords = coords.astype(np.float)
+
+ # flip contour so that min and max values are correct
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ longest = coords
+
+ #print("first few coordinates of the longest contour: ",longest[:3])
+
+ xlongest = []
+ ylongest = []
+ for i in range(len(longest)):
+ xlongest.append(longest[i][0])
+ ylongest.append(longest[i][1])
+
+ #print("first few x coordinates of the longest contour: ",xlongest[:3])
+ #print("first few y coordinates of the longest contour: ",ylongest[:3])
+
+
+ # Find a appropriate epsilon value for cluster_OPTICS, this will remove noise in the bottom 10% of the drop
+ #. most importantly noise is reduced at contact points.
+
+ # variables in this process are how much and what part of the top of the droplet we use to be representative of
+ # the full contour, and whether we use the max(distance) between points or the average between points, or
+ # a scalar value of either.
+
+ xtop = [] # isolate top 90% of drop
+ ytop = []
+ percent = 0.3
+ #print('Isolate the top ',100-(percent*100),'% of the contour:')
+ for n,y in enumerate(ylongest):
+ if y > min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xtop = np.array(xtop)
+ ytop = np.array(ytop)
+
+ top = []
+ for n,x in enumerate(xtop):
+ top.append([xtop[n],ytop[n]])
+ top = np.array(top)
+ top_array = optimized_path(top)
+
+ dists = [] # find the average distance between consecutive points
+ for n,co in enumerate(top_array):
+ if 1 min(ylongest) + (max(ylongest) - min(ylongest))*percent:
+ xtop.append(xlongest[n])
+ ytop.append(y)
+ xapex = (max(xtop) + min(xtop))/2
+
+ l_drop = []
+ r_drop = []
+ for n in longest:
+ if n[0] <= xapex:
+ l_drop.append(n)
+ if n[0] >= xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ r_drop[:,0] = r_drop[:,0] - xapex
+ l_drop[:,0] = -l_drop[:,0] + xapex
+
+ if display:
+ plt.plot(r_drop[:,[0]], r_drop[:,[1]], 'b,')
+ #plt.show()
+ #plt.close()
+ plt.plot(l_drop[:,[0]], l_drop[:,[1]], 'r,')
+ #plt.gca().set_aspect('equal', adjustable='box')
+ #plt.xlim([470,530])
+ #plt.ylim([-188,-190])
+ plt.show()
+ plt.close()
+
+ #############################
+
+ # the drop has been split in half
+
+ # this system has a user input which gives a rough indication of the contact point and the surface line
+
+ # isolate the bottom 5% of the contour near the contact point
+
+ drops = {}
+ counter = 0
+ crop_drop = {}
+ CPs = {}
+ for halfdrop in [l_drop,r_drop]:
+ new_halfdrop = sorted(halfdrop.tolist(), key=lambda x: (x[0],-x[1])) #top left to bottom right
+ new_halfdrop = optimized_path(new_halfdrop)#[::-1]
+
+ xnew_halfdrop = new_halfdrop[:,[0]].reshape(len(new_halfdrop[:,[0]]))
+ ynew_halfdrop = new_halfdrop[:,[1]].reshape(len(new_halfdrop[:,[1]]))
+
+ # isolate the bottom of the drop to help identify contact points (may not need to do this for all scenarios)
+ bottom = []
+ top = [] # will need this later
+ #print('isolate the bottom ',percent*100,'% of the contour:') # percent defined above
+ div_line_value = min(new_halfdrop[:,[1]]) + (max(new_halfdrop[:,[1]]) - min(new_halfdrop[:,[1]]))*percent
+ for n in new_halfdrop:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ xbottom = bottom[:,[0]].reshape(len(bottom[:,[0]]))
+ ybottom = bottom[:,[1]].reshape(len(bottom[:,[1]]))
+ xtop = top[:,[0]].reshape(len(top[:,[0]]))
+ ytop = top[:,[1]].reshape(len(top[:,[1]]))
+
+ #print('max x value of halfdrop is: ',max(xhalfdrop))
+
+ if 0: # plot the bottom 10% of the contour, check that the contour ordering is performing
+ plt.plot(xbottom, ybottom, 'b,')
+ plt.title('bottom 10% of the contour')
+ #plt.xlim([130,200])
+ plt.show()
+ plt.close()
+
+ #### Continue here assuming that the drop is hydrophobic ####
+ if 1:
+ # order all halfdrop points using optimized_path (very quick but occasionally makes mistakes)
+
+ xCP = min(xbottom)
+ #yCP = min([coord[1] for coord in new_halfdrop if coord[0]==xCP])
+ yCP = max([coord[1] for coord in bottom if coord[0]==xCP])
+ CPs[counter] = [xCP, yCP]
+
+ if display: #check
+ plt.plot(new_halfdrop[:,0],new_halfdrop[:,1])
+ plt.show()
+ plt.close()
+
+ # remove surface line past the contact point
+ index = new_halfdrop.tolist().index(CPs[counter]) #?
+
+ new_halfdrop = new_halfdrop[:index+1]
+
+ if 0:
+ xCP_index = [i for i, j in enumerate(xnew_halfdrop) if j == xCP]
+ #print('xCP_index is: ',xCP_index)
+ yCP_index = [i for i, j in enumerate(ynew_halfdrop) if j == yCP]
+ #print('yCP_index is: ',yCP_index)
+
+ new_halfdrop = np.zeros((len(xnew_halfdrop),2))
+ for n in range(len(xnew_halfdrop)):
+ new_halfdrop[n,[0]]=xnew_halfdrop[n]
+ new_halfdrop[n,[1]]=ynew_halfdrop[n]
+ #print('first 3 points of new_halfdrop are: ',new_halfdrop[:3])
+ #print('length of new_halfdrop is: ',len(new_halfdrop))
+
+ if xCP_index == yCP_index:
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if 0:
+ # order all halfdrop points using two-opt (the slower method)
+
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(bottom):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ if coord[1] ybot[yCP_index-1]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ try:
+ if ybot[yCP_index] > ybot[yCP_index-2]:
+ new_bot = np.zeros((len(xbot[yCP_index:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index+n]
+ new_bot[n,[1]] = ybot[yCP_index+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ except:
+ print('xCP_indexs are: ', xCP_indexs)
+ print('yCP_indexs are: ', yCP_indexs)
+ raise 'indexes of x and y values of the contact point are not the same'
+ new_halfdrop = np.concatenate((new_top,new_bot))
+
+ if 0: # order the points so that the baseline can be removed
+ # before any ordering is done, chop off the surface line that is past the drop edge
+ del_indexes = []
+ for index,coord in enumerate(halfdrop):
+ if coord[0]>max(xtop):
+ del_indexes.append(index)
+ #halfdrop = np.delete(halfdrop,del_indexes)
+ xhalfdrop = np.delete(xhalfdrop,del_indexes)
+ yhalfdrop = np.delete(yhalfdrop,del_indexes)
+ #print('shape of another_halfdrop is: '+ str(type(another_halfdrop)))
+ #print('first few points of halfdrop are: ',halfdrop[:3])
+
+
+
+ # order half contour points
+ xx,yy = sort_to_line(xhalfdrop,yhalfdrop)
+ add_top = False
+ #print('length of halfdrop is: ', len(halfdrop))
+ #print('length of xbottom is: ', len(xbottom))
+
+ #if xx[0]<1: # then graph starts at the top
+ surface_past_drop_index = []
+ for n,x in enumerate(xx):
+ if x>max(xtop):
+ surface_past_drop_index.append(n)
+ #xx = xx[:max(xtop)point]
+ #print('Indexes of contour points past drop: ',surface_past_drop_index)
+
+
+ # if the sort method will not work
+ if len(xx) < len(xhalfdrop): # assumes that the error is on the surface somewhere, so uses bottom of contour
+ add_top = True
+ print()
+ print('sort_to_line is not utilising the full contour, alternate ordering method being used')
+ print('check bottom 10% of contour...')
+ # this method is much slower than the above, so use as few points as possible
+ bot_list = []
+ for n in range(len(xbottom)):
+ if xbottom[n] ybot[yCP_index[0]-1]:
+ new_bot = np.zeros((len(xbot[yCP_index[0]:]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[xCP_index[0]+n]
+ new_bot[n,[1]] = ybot[yCP_index[0]+n]
+ else:
+ new_bot = np.zeros((len(xbot[:yCP_index[0]]),2))
+ for n in range(len(new_bot)):
+ new_bot[n,[0]] = xbot[n]
+ new_bot[n,[1]] = ybot[n]
+ else:
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ # combine new_bot with top_array to give the isolated drop contour without surface
+ if 0:
+ top_array = np.zeros((len(xtop),2))
+ for n in range(len(xtop)):
+ top_array[n,[0]] = xtop[n]
+ top_array[n,[1]] = ytop[n]
+
+ new_halfdrop = np.concatenate((top,new_bot))
+
+ # re-order to check that the error was at the surface line
+ xx,yy = sort_to_line(new_halfdrop[:,[0]],new_halfdrop[:,[1]])
+ if len(xx)new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ else:
+ raise_error = True
+ for x in xCP_index:
+ for y in yCP_index:
+ if x==y:
+ raise_error = False
+ xCP_index = [x]
+ yCP_index = [y]
+ #print('indexes of the CP are: ',xCP_index[0],', ',yCP_index[0])
+ if new_halfdrop[xCP_index[0]+1][1]>new_halfdrop[xCP_index[0]-1][1]:
+ new_halfdrop = new_halfdrop[xCP_index[0]:]
+ else:
+ new_halfdrop = new_halfdrop[:xCP_index[0]+1]
+ if raise_error == True:
+ print('The index of the contact point x value is: ', new_halfdrop[xCP_index])
+ print('The index of the contact point y value is: ', new_halfdrop[yCP_index])
+ raise 'indexes of x and y values of the contact point are not the same'
+
+ if counter == 0:
+ drops[counter] = new_halfdrop[::-1]
+ else:
+ drops[counter] = new_halfdrop
+
+ if display: #display
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(new_halfdrop))))
+ for k in new_halfdrop:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('outputted halfdrop')
+ plt.axis('equal')
+ plt.show()
+ plt.close()
+
+ counter+=1
+
+ # reflect the left drop and combine left and right
+
+ profile = np.empty((len(drops[0])+len(drops[1]),2))
+ for i,n in enumerate(drops[0]):
+ flipped = n
+ flipped[0] = -flipped[0]
+ profile[i] = flipped
+ for i,n in enumerate(drops[1]):
+ profile[len(drops[0])+i] = n
+ CPs[0][0] = -CPs[0][0]
+
+ if display:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(profile))))
+ for k in profile:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('final output')
+ #plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ plt.title('final output')
+ plt.plot(profile[:,0],profile[:,1],'b')
+ plt.show()
+ plt.close()
+
+ # flip upside down again so that contour follows image indexing
+ # and transform to the right so that x=0 is no longer in line with apex
+ for coord in profile:
+ coord[1] = -coord[1]
+ coord[0] = coord[0] + xapex
+ for n in [0,1]:
+ CPs[n][1] = -CPs[n][1]
+ CPs[n][0] = CPs[n][0] + xapex
+
+ # flip original contour back to original orientation
+ for coord in coords:
+ coord[1] = -coord[1]
+
+ return profile,CPs
+
+def find_contours(image):
+ """
+ Calls cv2.findContours() on passed image in a way that is compatible with OpenCV 4.x, 3.x or 2.x
+ versions. Passed image is a numpy.array.
+
+ Note, cv2.findContours() will treat non-zero pixels as 1 and zero pixels as 0, so the edges detected will only
+ be those on the boundary of pixels with non-zero and zero values.
+
+ Returns a numpy array of the contours in descending arc length order.
+ """
+ if len(image.shape) > 2:
+ raise ValueError('`image` must be a single channel image')
+
+ if CV2_VERSION >= (4, 0, 0):
+ # In OpenCV 4.0, cv2.findContours() no longer returns three arguments, it reverts to the same return signature
+ # as pre 3.2.0.
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ elif CV2_VERSION >= (3, 2, 0):
+ # In OpenCV 3.2, cv2.findContours() does not modify the passed image and instead returns the
+ # modified image as the first, of the three, return values.
+ _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+ else:
+ contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)
+
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ return contours
+
+def extract_edges_CV(img, threshold_val=None, return_thresholed_value=False, display=False):
+ '''
+ give the image and return a list of [x.y] coordinates for the detected edges
+
+ '''
+ IGNORE_EDGE_MARGIN = 1
+ img = img.astype("uint8")
+ try:
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ except:
+ gray = img
+
+ if threshold_val == None:
+ #ret, thresh = cv2.threshold(gray,threshValue,255,cv2.THRESH_BINARY)
+ ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ else:
+ ret, thresh = cv2.threshold(gray,threshold_val,255,cv2.THRESH_BINARY)
+
+ contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+ # Each contour has shape (n, 1, 2) where 'n' is the number of points. Presumably this is so each
+ # point is a size 2 column vector, we don't want this so reshape it to a (n, 2)
+ contours = [contour.reshape(contour.shape[0], 2) for contour in contours]
+
+ # Sort the contours by arc length, descending order
+ contours.sort(key=lambda c: cv2.arcLength(c, False), reverse=True)
+
+ #Assume that the drop is the largest contour
+ #drop_profile = contours[0]
+ drop_profile = contours[0]
+
+ #Put the drop contour coordinates in order (from ?? to ??)
+ #drop_profile = squish_contour(drop_profile)
+
+ # Ignore points of the drop profile near the edges of the drop image
+ width, height = img.shape[1::-1]
+ if not (width < IGNORE_EDGE_MARGIN or height < IGNORE_EDGE_MARGIN):
+ mask = ((IGNORE_EDGE_MARGIN < drop_profile[:, 0]) & (drop_profile[:, 0] < width - IGNORE_EDGE_MARGIN) &
+ (IGNORE_EDGE_MARGIN < drop_profile[:, 1]) & (drop_profile[:, 1] < height - IGNORE_EDGE_MARGIN))
+ drop_profile = drop_profile[mask]
+
+ output = []
+ for coord in drop_profile:
+ if list(coord) not in output:
+ output.append(list(coord))
+ output = np.array(output)
+
+ if return_thresholed_value==True:
+ return output, ret
+ else:
+ return output
+
+def tilt_correction(img, baseline, user_set_baseline=False):
+ """img is an image input
+ baseline is defined by two points in the image"""
+
+ p1,p2 = baseline[0],baseline[1]
+ x1,y1 = p1
+ x2,y2 = p2
+
+ #assert(not x1 == x2 or y1 == y2)
+ if y1 == y2: # image is level
+ return img
+
+ if user_set_baseline == True:
+ img = img[:int(max([y1,y2])), :]
+
+ t = float(y2 - y1) / (x2 - x1)
+ rotate_angle = math.degrees(math.atan(t))
+ if rotate_angle > 45:
+ rotate_angle = -90 + rotate_angle
+ elif rotate_angle < -45:
+ rotate_angle = 90 + rotate_angle
+ rotate_img = ndimage.rotate(img, rotate_angle)
+ print('image rotated by '+str(rotate_angle)+' degrees')
+
+ # crop black edges created when rotating
+ width = np.sin(np.deg2rad(rotate_angle))
+ side = math.ceil(abs(width*rotate_img.shape[1]))
+ roof = math.ceil(abs(width*rotate_img.shape[0]))
+ rotate_img_crop = rotate_img[roof:-roof,side:-side]
+
+ return rotate_img_crop
+
+def preprocess(img, correct_tilt=True, display=False):
+ """This code serves as a discrete instance of image preprocessing before contact
+ angle fit software is implemented.
+
+ This includes automatic identification of the drop through Hough transform,
+ followed by cropping of the image to isolate the drop. Tilt correction is then
+ performed using the identified contact points of the drop.
+ An isolated (cropped) and tilt corrected image is outputted.
+ """
+ # preprocessing
+ img_crop, bounds = auto_crop(img.copy(),verbose=0)
+ L,R,T,B = bounds
+ edges_pts = extract_edges_CV(img_crop) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img_crop)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ profile,CPs = prepare_hydrophobic(edges_pts,display=display)
+
+ if correct_tilt:
+ baseline = [CPs[0],CPs[1]]
+ tilt_corrected_crop = tilt_correction(img_crop, baseline)
+
+ if display:
+ plt.imshow(tilt_corrected_crop)
+ plt.title('tilt corrected and cropped image')
+ plt.show()
+ plt.close()
+
+ edges_pts = extract_edges_CV(tilt_corrected_crop)
+ profile,CPs = prepare_hydrophobic(edges_pts,display=display)
+ return tilt_corrected_crop, profile
+ else:
+ return img_crop, profile
+
+def preprocess_ML(img, display=False):
+ """This code serves as a discrete instance of image preprocessing before contact
+ angle fit software is implemented.
+
+ This includes automatic identification of the drop through Hough transform,
+ followed by cropping of the image to isolate the drop. Tilt correction is then
+ performed using the identified contact points of the drop.
+ An isolated (cropped) and tilt corrected image is outputted.
+ """
+ # preprocessing
+ img_crop, bounds = auto_crop(img.copy(),verbose=0)
+ L,R,T,B = bounds
+ edges_pts = extract_edges_CV(img_crop) # array of x,y coords where lines are detected
+
+ if display:
+ plt.imshow(img_crop)
+ plt.plot(edges_pts[:,0],edges_pts[:,1])
+ plt.title('drop found by hough transform')
+ plt.show()
+ plt.close()
+
+ profile,CPs = prepare_hydrophobic(edges_pts,display=display)
+
+ baseline = [CPs[0],CPs[1]]
+ tilt_corrected_crop = tilt_correction(img_crop, baseline)
+
+ if display:
+ plt.imshow(tilt_corrected_crop)
+ plt.title('tilt corrected and cropped image')
+ plt.show()
+ plt.close()
+
+ return tilt_corrected_crop
+
+if 0: #test
+ #IMG_PATH = '/Users/dgshaw/OneDrive - The University of Melbourne/experimental-image-database/Chiara-group-images/Natural/20171112JT.BMP'
+ #IMG_PATH = '/Users/dgshaw/OneDrive - The University of Melbourne/experimental-image-database/Binyu-group-images/96-8.bmp'
+ #IMG_PATH = '/Users/dgshaw/OneDrive - The University of Melbourne/experimental-image-database/evaporation-screenshots/Screenshot 2023-05-05 at 2.21.34 pm.png'
+ #IMG_PATH = '/Users/dgshaw/cloudstor/files/models/model_v03/sensitivity_spartan_replication/sensitivity_dataset/170.0_1.5_1.0_0.0_.png'
+ #IMG_PATH = '/Users/dgshaw/Library/CloudStorage/OneDrive-TheUniversityofMelbourne/experimental-image-database/Kristina/2023_05_26_RWTH_AVT_FVT_Kristina_Mielke_selected_pictures/static_contact_angle/20230328_water_solvent_1b_0_250_15_6.jpg'
+ #IMG_PATH = '/Users/dgshaw/Library/CloudStorage/OneDrive-TheUniversityofMelbourne/experimental-image-database/Kristina/2023_05_26_RWTH_AVT_FVT_Kristina_Mielke_selected_pictures/static_contact_angle/20230328_water_solvent_1b_0_250_15_1.jpg'
+ #IMG_PATH = '/Users/dgshaw/OneDrive - The University of Melbourne/experimental-image-database/MCFP-CA-images/CA images batch 1/Water+MOF/20221101-24h/115.553909.bmp'
+ IMG_PATH = '/Users/dgshaw/OneDrive - The University of Melbourne/experimental-image-database/Current/10.bmp'
+
+ img = cv2.imread(IMG_PATH, cv2.IMREAD_COLOR)
+
+ if 1:
+ plt.title('loaded image')
+ plt.imshow(img)
+ plt.show()
+ plt.close()
+
+ print('beginning preprocessing')
+ img,profile = preprocess(img, display=True)
+
+ if 1:
+ plt.title('processed image')
+ plt.imshow(img)
+ plt.show()
+ plt.close()
+ print('preprocessing ended')
+ print()
+ print('repeating preprocessing with display off')
+
+ img, profile = preprocess(img, display=False)
diff --git a/modules/read_image.py b/modules/read_image.py
new file mode 100644
index 0000000..87ac06a
--- /dev/null
+++ b/modules/read_image.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+import subprocess
+import cv2
+import time
+import datetime
+import timeit
+import os
+import numpy as np
+
+IMAGE_FLAG = 1 # 1 returns three channels (BGR), 0 returns gray
+
+
+def get_image(experimental_drop, experimental_setup, frame_number):
+ import_from_source(experimental_drop, experimental_setup, frame_number)
+ # experimental_drop.image = np.flipud(cv2.imread('drop.png', 1))
+ # experimental_drop.time = timeit.default_timer()
+ if frame_number == 0:
+ experimental_setup.time_string = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
+ if experimental_setup.create_folder_boole:
+ filename_less_extension = experimental_setup.filename[:-4] # trim off image extension
+ print(filename_less_extension)
+ new_directory = os.path.join(experimental_setup.directory_string, filename_less_extension + "_" + experimental_setup.time_string)
+ print(new_directory)
+ os.makedirs(new_directory)
+ experimental_setup.directory_string = new_directory
+
+ #if (frame_number >= 0) and (experimental_setup.save_images_boole):
+ # save_image(experimental_drop, experimental_setup, frame_number)
+
+def save_image(experimental_drop, experimental_setup, frame_number):
+ filename_temp = os.path.join(experimental_setup.directory_string, experimental_setup.filename) # gets the filename for the file to be saved
+ time_string = experimental_setup.time_string # imports the time_string from the initial experiment
+ filename = filename_temp[:-4] + '_' + time_string + '_' + str(frame_number).zfill(3) + filename_temp[-4:]
+ cv2.imwrite(filename, experimental_drop.image)
+
+# this routine imports the raw drop image based on user input image source
+# image_source = 0 : Flea3
+# image_source = 1 : USB camera
+# image_source = 2 : image on computer
+def import_from_source(experimental_drop, experimental_setup, frame_number):
+ image_source = experimental_setup.image_source
+ # from Flea3 camera
+ if image_source == "Flea3":
+ image_from_Flea3(experimental_drop)
+ # from USB camera
+ elif image_source == "USB camera":
+ image_from_camera(experimental_drop)
+ # from specified file
+ elif image_source == "Local images":
+ image_from_harddrive(experimental_drop, experimental_setup, frame_number)
+ # else the value of img_src is incorrect
+ else:
+ ValueError("Incorrect value for image_source")
+ # experimental_drop.time = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
+ experimental_drop.time = timeit.default_timer()
+ # experimental_drop.image = np.flipud(cv2.imread(experimental_drop.filename, IMAGE_FLAG))
+
+
+def image_from_Flea3(experimental_drop):
+ subprocess.call(["./FCGrab"])
+ temp_filename = 'FCG.pgm'
+ experimental_drop.image = cv2.imread(temp_filename, IMAGE_FLAG)
+ #os.remove(temp_filename)
+ # experimental_drop.filename
+
+def image_from_harddrive(experimental_drop, experimental_setup, frame_number):
+ import_filename = get_import_filename(experimental_setup, frame_number)
+ experimental_drop.image = cv2.imread(import_filename, IMAGE_FLAG)
+
+def get_import_filename(experimental_setup, frame_number):
+ return experimental_setup.import_files[frame_number*(frame_number>0)] # handles initialisation frame = -1
+
+# Captures a single image from the camera and returns it in IplImage format
+def image_from_camera(experimental_drop):
+ grabanimage()
+ temp_filename = 'USBtemp.png'
+ experimental_drop.image = cv2.imread(temp_filename, IMAGE_FLAG)
+
+def grabanimage():
+ camera_port = 0
+ ramp_frames = 40
+ camera = cv2.VideoCapture(camera_port)
+
+ def get_image():
+ retval, im = camera.read()
+ return im
+
+ for i in xrange(ramp_frames):
+ temp = get_image()
+ print("Taking image...")
+# Take the actual image we want to keep
+ camera_capture = get_image()
+ file = "USBtemp.png"
+# A nice feature of the imwrite method is that it will automatically choose the
+# correct format based on the file extension you provide. Convenient!
+ cv2.imwrite(file, camera_capture)
diff --git a/modules/read_image.pyc b/modules/read_image.pyc
new file mode 100644
index 0000000..7a10132
Binary files /dev/null and b/modules/read_image.pyc differ
diff --git a/modules/read_image.py~ b/modules/read_image.py~
new file mode 100644
index 0000000..169bc31
--- /dev/null
+++ b/modules/read_image.py~
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+import subprocess
+import cv2
+import time
+import datetime
+import timeit
+import os
+import numpy as np
+
+IMAGE_FLAG = 1 # 1 returns three channels (BGR), 0 returns gray
+
+
+def get_image(experimental_drop, experimental_setup, frame_number):
+ import_from_source(experimental_drop, experimental_setup, frame_number)
+ # experimental_drop.image = np.flipud(cv2.imread('drop.png', 1))
+ # experimental_drop.time = timeit.default_timer()
+ if frame_number == 0:
+ experimental_setup.time_string = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
+ if experimental_setup.create_folder_boole:
+ filename_less_extension = experimental_setup.filename[:-4] # trim off image extension
+ print(filename_less_extension)
+ new_directory = os.path.join(experimental_setup.directory_string, filename_less_extension + "_" + experimental_setup.time_string)
+ print(new_directory)
+ os.makedirs(new_directory)
+ experimental_setup.directory_string = new_directory
+
+ if (frame_number >= 0) and (experimental_setup.save_images_boole):
+ save_image(experimental_drop, experimental_setup, frame_number)
+
+def save_image(experimental_drop, experimental_setup, frame_number):
+ filename_temp = os.path.join(experimental_setup.directory_string, experimental_setup.filename) # gets the filename for the file to be saved
+ time_string = experimental_setup.time_string # imports the time_string from the initial experiment
+ filename = filename_temp[:-4] + '_' + time_string + '_' + str(frame_number).zfill(3) + filename_temp[-4:]
+ cv2.imwrite(filename, experimental_drop.image)
+
+# this routine imports the raw drop image based on user input image source
+# image_source = 0 : Flea3
+# image_source = 1 : USB camera
+# image_source = 2 : image on computer
+def import_from_source(experimental_drop, experimental_setup, frame_number):
+ image_source = experimental_setup.image_source
+ # from Flea3 camera
+ if image_source == "Flea3":
+ image_from_Flea3(experimental_drop)
+ # from USB camera
+ elif image_source == "USB camera":
+ image_from_camera(experimental_drop)
+ # from specified file
+ elif image_source == "Local images":
+ image_from_harddrive(experimental_drop, experimental_setup, frame_number)
+ # else the value of img_src is incorrect
+ else:
+ ValueError("Incorrect value for image_source")
+ # experimental_drop.time = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
+ experimental_drop.time = timeit.default_timer()
+ # experimental_drop.image = np.flipud(cv2.imread(experimental_drop.filename, IMAGE_FLAG))
+
+
+def image_from_Flea3(experimental_drop):
+ subprocess.call(["./FCGrab"])
+ temp_filename = 'FCG.pgm'
+ experimental_drop.image = cv2.imread(temp_filename, IMAGE_FLAG)
+ os.remove(temp_filename)
+ # experimental_drop.filename
+
+def image_from_harddrive(experimental_drop, experimental_setup, frame_number):
+ import_filename = get_import_filename(experimental_setup, frame_number)
+ experimental_drop.image = cv2.imread(import_filename, IMAGE_FLAG)
+
+def get_import_filename(experimental_setup, frame_number):
+ return experimental_setup.import_files[frame_number*(frame_number>0)] # handles initialisation frame = -1
+
+# Captures a single image from the camera and returns it in IplImage format
+def image_from_camera(experimental_drop):
+ grabanimage()
+ temp_filename = 'USBtemp.png'
+ experimental_drop.image = cv2.imread(temp_filename, IMAGE_FLAG)
+
+def grabanimage():
+ camera_port = 0
+ ramp_frames = 40
+ camera = cv2.VideoCapture(camera_port)
+
+ def get_image():
+ retval, im = camera.read()
+ return im
+
+ for i in xrange(ramp_frames):
+ temp = get_image()
+ print("Taking image...")
+# Take the actual image we want to keep
+ camera_capture = get_image()
+ file = "USBtemp.png"
+# A nice feature of the imwrite method is that it will automatically choose the
+# correct format based on the file extension you provide. Convenient!
+ cv2.imwrite(file, camera_capture)
+
+
+
diff --git a/modules/select_regions.py b/modules/select_regions.py
new file mode 100644
index 0000000..9902dfc
--- /dev/null
+++ b/modules/select_regions.py
@@ -0,0 +1,600 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+# from classes import ExperimentalDrop
+# from subprocess import call
+# import numpy as np
+import cv2
+import numpy as np
+import matplotlib.pyplot as plt
+# import time
+# import datetime
+# from Tkinter import *
+# import tkFileDialog
+import sys
+from scipy import optimize # DS 7/6/21 - for least squares fit
+import tensorflow as tf # DS 9/6/21 - for loading ML model
+
+from .preprocessing import prepare_hydrophobic, tilt_correction
+
+# import os
+
+MAX_IMAGE_TO_SCREEN_RATIO = 0.8
+
+def set_drop_region(experimental_drop, experimental_setup):
+ # select the drop and needle regions in the image
+ screen_size = experimental_setup.screen_resolution
+ image_size = experimental_drop.image.shape
+ scale = set_scale(image_size, screen_size)
+ screen_position = set_screen_position(screen_size)
+ if experimental_setup.drop_ID_method == "Automated":
+ from .preprocessing import auto_crop
+ experimental_drop.cropped_image, (left,right,top,bottom) = auto_crop(experimental_drop.image)
+
+ if 1: #show found drop
+ plt.title('original image')
+ plt.imshow(experimental_drop.image)
+ plt.show()
+ plt.close()
+
+ plt.title('cropped image')
+ plt.imshow(experimental_drop.cropped_image)
+ plt.show()
+ plt.close()
+ experimental_setup.drop_region = [(left, top),(right,bottom)]
+ elif experimental_setup.drop_ID_method == "User-selected":
+ experimental_setup.drop_region = user_ROI(experimental_drop.image, 'Select drop region', scale, screen_position)
+ experimental_drop.cropped_image = image_crop(experimental_drop.image, experimental_setup.drop_region)
+ # experimental_setup.needle_region = user_line(experimental_drop.image, 'Select needle region', scale, screen_position)
+
+def image_crop(image, points):
+ # return image[min(y):max(y), min(x),max(x)]
+ return image[int(points[0][1]):int(points[1][1]), int(points[0][0]):int(points[1][0])]
+
+def set_surface_line(experimental_drop, experimental_setup):
+ if experimental_setup.baseline_method == "Automated":
+ experimental_drop.drop_contour, experimental_drop.contact_points = prepare_hydrophobic(experimental_drop.contour)
+ elif experimental_setup.baseline_method == "User-selected":
+ user_line(experimental_drop, experimental_setup)
+
+
+def correct_tilt(experimental_drop, experimental_setup):
+ if experimental_setup.baseline_method == "Automated":
+ experimental_drop.cropped_image = tilt_correction(experimental_drop.cropped_image, experimental_drop.contact_points)
+
+ #gets tricky where the baseline is manually set because under the current workflow users would
+ # be required to re-input their baseline until it's flat - when the baseline should be flat
+ # and known once it's set and corrected for
+ elif experimental_setup.baseline_method == "User-selected":
+ rotated_img_crop = tilt_correction(img, experimental_drop.contact_points, user_set_baseline=True)
+
+def set_scale(image_size, screen_size):
+ x_ratio = image_size[1]/float(screen_size[0])
+ y_ratio = image_size[0]/float(screen_size[1])
+ max_ratio = max(x_ratio, y_ratio)
+ scale = 1
+ if max_ratio > MAX_IMAGE_TO_SCREEN_RATIO:
+ scale = MAX_IMAGE_TO_SCREEN_RATIO / max_ratio
+ return scale
+
+def set_screen_position(screen_size):
+ prec_free_space = 0.5 * (1 - MAX_IMAGE_TO_SCREEN_RATIO) # percentage room free
+ x_position = int(prec_free_space * screen_size[0])
+ y_position = int(0.5 * prec_free_space * screen_size[1]) # 0.5 moves window a little bit higher
+ return [x_position, y_position]
+
+def user_ROI(raw_image, title, scale, screen_position): #, line_colour=(0, 0, 255), line_thickness=2):
+ global drawing
+ global ix, iy
+ global fx, fy
+ global image_TEMP
+ global img
+ # raw_image = raw_image2
+ # raw_image = np.flipud(cv2.cvtColor(raw_image2,cv2.COLOR_GRAY2BGR))
+ # raw_image = np.flipud(raw_image2)
+ drawing = False # true if mouse is pressed
+ ix,iy = -1,-1
+ fx,fy = -1,-1
+
+ cv2.namedWindow(title, cv2.WINDOW_AUTOSIZE)
+ cv2.moveWindow(title, screen_position[0], screen_position[1])
+ cv2.setMouseCallback(title, draw_rectangle)
+ #scale =1
+ image_TEMP = cv2.resize(raw_image, (0,0), fx=scale, fy=scale)
+
+ img = image_TEMP.copy()
+
+ while(1):
+ cv2.imshow(title,img)
+
+ k = cv2.waitKey(1) & 0xFF
+ if k != 255:
+ if (k == 13) or (k == 32):
+ # either 'return' or 'space' pressed
+ # break
+ if ((fx - ix) * (fy - iy)) != 0: # ensure there is an enclosed region
+ break
+ if (k == 27):
+ # 'esc'
+ kill()
+
+ cv2.destroyAllWindows()
+ min_x = min(ix, fx) / scale
+ max_x = max(ix, fx) / scale
+ min_y = min(iy, fy) / scale
+ max_y = max(iy, fy) / scale
+ return [(min_x, min_y), (max_x, max_y)]
+
+def user_line(experimental_drop, experimental_setup):
+ #scale = set_scale(experimental_drop.image.shape, experimental_setup.screen_resolution)
+ screen_position = set_screen_position(experimental_setup.screen_resolution)
+ raw_image = experimental_drop.cropped_image
+ drop_data = experimental_drop.contour.astype(float)
+ CPs = experimental_drop.contact_points
+ title = 'Define surface line'
+ #line = experimental_drop.surface_data # not set yet
+ region = experimental_setup.drop_region
+
+ global drawing
+ global ix, iy
+ global fx, fy
+ global image_TEMP
+ global img
+
+ DRAW_TANGENT_LINE_WHILE_SETTING_BASELINE = True
+ TEMP = False
+ baseline_def_method = 'use user-inputted points'
+
+ # raw_image = raw_image2
+ # raw_image = np.flipud(cv2.cvtColor(raw_image2,cv2.COLOR_GRAY2BGR))
+ # raw_image = np.flipud(raw_image2)
+ drawing = True # true if mouse is pressed
+ ix,iy = -1,-1
+ fx,fy = -1,-1
+
+ region = np.floor(region)
+ # print(region)
+
+ # print(region[0,0])
+ # print(region[1,0])
+ # print(region[0,1])
+ # print(region[1,1])
+
+ # cv2.setMouseCallback(title, draw_line)
+
+ scale = 1
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+
+ # set surface line starting estimate
+ N = np.shape(drop_data)[0]
+ A = 1 #50 # maybe lower this?
+ xx = np.concatenate((drop_data[0:A,0],drop_data[N-A:N+1,0]))
+ yy = np.concatenate((drop_data[0:A,1],drop_data[N-A:N+1,1]))
+ coefficients = np.polyfit(xx, yy, 1)
+ line = np.poly1d(coefficients)
+
+ xx = np.array([0,img.shape[1]])
+ yy = line(xx) #gives a starting guess for the line position
+
+ ix0,fx0 = xx.astype(int)
+ iy0,fy0 = yy.astype(int)
+
+ ix,fx = ix0,fx0
+ iy,fy = iy0,fy0
+
+ cv2.namedWindow(title, cv2.WINDOW_AUTOSIZE)
+ cv2.moveWindow(title, screen_position[0], screen_position[1])
+
+ if DRAW_TANGENT_LINE_WHILE_SETTING_BASELINE: #so that things can be drawn over the image which surface line is changed
+ conans = {}
+ if 0:
+ for i,n in enumerate(drop_data):
+ if n[0]==CPs[0][0] and int(n[1])==int(CPs[0][1]):
+ start_index = i
+ if int(n[0])==int(CPs[1][0]) and int(n[1])==int(CPs[1][1]):
+ end_index = i
+ auto_drop = drop_data.copy()[start_index:end_index]
+ else:
+ auto_drop = drop_data
+
+ while(1):
+ cv2.imshow(title,img)
+ #cv2.circle(img,(200,200),5,(255,255,0),2)
+ cv2.line(img,(ix,iy),(fx,fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+ #Plot pixels above line
+ #cv2.waitKey(0)
+ v1 = (ix-fx,iy-fy) #(1,coefficients[1]) # Vector 1
+
+ #print(np.shape(drop_data))
+ #print(drop_data)
+
+ #drop_data_list = np.ndarray.tolist(drop_data)
+ #start = sorted(drop_data_list, key=lambda x: (x[1]))[-1]
+ #sorted_drop_data_list = optimized_path(drop_data_list,start)
+ #drop_data = np.array(sorted_drop_data_list)
+ #print(type(drop_data))
+
+ if 1:
+ drop = []
+ for i in drop_data:
+ cx,cy = i
+ v2 = (cx-ix, cy-iy) # Vector 1
+ xp = v1[0]*v2[1] - v1[1]*v2[0] # Cross product
+ if xp > 0:
+ drop.append([cx,cy])
+ cv2.circle(img,(int(cx),int(cy)),2,(255,255,255),1)
+ else:
+ drop = []
+ for i in drop_data:
+ cx,cy = i
+ #if contour point y value less than line y value
+ if cy < line(cx):
+ drop.append([cx,cy])
+ cv2.circle(img,(int(cx),int(cy)),2,(255,255,255),1)
+
+
+ drop = np.asarray(drop).astype(float) #drop is the contour above the user-inputted line
+
+ if 0:
+ plt.imshow(img)
+ plt.title('check contour after being cut by baseline')
+ plt.plot(drop[:,0],drop[:,1])
+ plt.show()
+ plt.close()
+
+ experimental_drop.drop_contour = drop
+ CPs = {0: drop[0], 1:drop[-1]}
+ experimental_drop.contact_points = CPs
+
+ if DRAW_TANGENT_LINE_WHILE_SETTING_BASELINE:
+ if experimental_setup.tangent_boole == True or experimental_setup.second_deg_polynomial_boole == True or experimental_setup.circle_boole == True or experimental_setup.ellipse_boole == True:
+ from .fits import perform_fits
+ perform_fits(experimental_drop, tangent=experimental_setup.tangent_boole, polynomial=experimental_setup.second_deg_polynomial_boole, circle=experimental_setup.circle_boole,ellipse=experimental_setup.ellipse_boole)
+ if experimental_setup.tangent_boole == True:
+ tangent_lines = tuple(experimental_drop.contact_angles['tangent fit']['tangent lines'])
+ cv2.line(img, (int(tangent_lines[0][0][0]),int(tangent_lines[0][0][1])),(int(tangent_lines[0][1][0]),int(tangent_lines[0][1][1])), (0, 0, 255), 2)
+ cv2.line(img, (int(tangent_lines[1][0][0]),int(tangent_lines[1][0][1])),(int(tangent_lines[1][1][0]),int(tangent_lines[1][1][1])),(0, 0, 255), 2)
+ if experimental_setup.second_deg_polynomial_boole == True and experimental_setup.tangent_boole == False:
+ tangent_lines = tuple(experimental_drop.contact_angles['polynomial fit']['tangent lines'])
+ cv2.line(img, tangent_lines[0][0],tangent_lines[0][1], (0, 0, 255), 2)
+ cv2.line(img, tangent_lines[1][0],tangent_lines[1][1], (0, 0, 255), 2)
+ if experimental_setup.circle_boole == True:
+ xc,yc = experimental_drop.contact_angles['circle fit']['circle center']
+ r = experimental_drop.contact_angles['circle fit']['circle radius']
+ cv2.circle(img,(int(xc),int(yc)),int(r),(255,150,0),1)
+ if experimental_setup.ellipse_boole == True:
+ center = experimental_drop.contact_angles['ellipse fit']['ellipse center']
+ axes = experimental_drop.contact_angles['ellipse fit']['ellipse a and b']
+ phi = experimental_drop.contact_angles['ellipse fit']['ellipse rotation']
+ cv2.ellipse(img, (int(center[0]),int(center[1])), (int(axes[0]),int(axes[1])), phi, 0, 360, (0, 88, 255), 1)
+
+ k = cv2.waitKey(1) & 0xFF
+ #print(k)
+ if k != 255:
+
+ if (k == 13) or (k == 32):
+ # either 'return' or 'space' pressed
+ # break
+ if ((fx - ix) * (fy - iy)) != 0: # ensure there is an enclosed region
+ break
+ else: # something weird happening here, insert work around
+ print('something is not right...')
+ print(fx)
+ print(ix)
+ print(fy)
+ print(iy)
+ print(((fx - ix) * (fy - iy)))
+ break
+
+ if (k == 27):
+ # 'esc'
+ kill()
+ if (k==-1):
+ continue
+ if (k == 0): #up key (down on image)
+ fy = fy+1
+ iy = iy+1
+
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+ if (k == 1): #down key (up on image)
+ fy = fy-1
+ iy = iy-1
+
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+
+
+ if (k == 111): #"o" key
+ if 1:
+ fx,fy = fx0,fy0
+ ix,iy = ix0,iy0
+
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+ else:
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+ #cv2.line(img,())
+
+
+ if (k == 2) or (k == 3): #83: right key (Clockwise)
+ x0 = np.array([ix,iy])
+ x1 = np.array([fx,fy])
+ xc = 0.5*(x0+x1)
+ theta = 0.1/180*np.pi
+ if (k == 2): #left key
+ theta = -theta
+
+ rotation = np.zeros((2,2))
+ rotation[0,0] = np.cos(theta)
+ rotation[0,1] = -np.sin(theta)
+ rotation[1,0] = np.sin(theta)
+ rotation[1,1] = np.cos(theta)
+
+ x0r = np.dot(rotation,(x0-xc).T)+xc
+ x1r = np.dot(rotation,(x1-xc).T)+xc
+
+ ix,iy = x0r.astype(int)
+ fx,fy = x1r.astype(int)
+
+ if TEMP:
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ else:
+ image_TEMP = raw_image.copy()
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+
+ if (k == 112): #'p' key
+ #print(contact_angle1,contact_angle2)
+ for key in conans.keys():
+ print(key,': ',conans[key])
+ print()
+ #print(conans)
+ #print(m1,m2)
+
+ if (k == -1):
+ continue
+# else:
+# print(k)
+
+
+ cv2.destroyAllWindows()
+ min_x = min(ix, fx) / scale
+ max_x = max(ix, fx) / scale
+ min_y = min(iy, fy) / scale
+ max_y = max(iy, fy) / scale
+
+# mouse callback function
+def draw_rectangle(event,x,y,flags,param):
+ global ix,iy,drawing
+ global fx, fy
+ global image_TEMP
+ global img
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ img = image_TEMP.copy()
+ drawing = True
+ ix,iy = x,y
+
+ elif event == cv2.EVENT_MOUSEMOVE:
+ if drawing == True:
+ img = image_TEMP.copy()
+ cv2.rectangle(img,(ix,iy),(x,y), (0, 0, 255), 2)# line_colour,line_thickness)
+
+ elif event == cv2.EVENT_LBUTTONUP:
+ img = image_TEMP.copy()
+ drawing = False
+ fx, fy = x, y
+ cv2.rectangle(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+
+# mouse callback function
+def draw_line(event,x,y,flags,param):
+ global ix,iy,drawing
+ global fx, fy
+ global image_TEMP
+ global img
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ img = image_TEMP.copy()
+ drawing = True
+ ix,iy = x,y
+
+ elif event == cv2.EVENT_MOUSEMOVE:
+ if drawing == True:
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(x,y), (0, 0, 255), 2)# line_colour,line_thickness)
+
+ elif event == cv2.EVENT_LBUTTONUP:
+ img = image_TEMP.copy()
+ drawing = False
+ fx, fy = x, y
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+
+def kill():
+ sys.exit()
+
+def distance(P1, P2):
+ """This function computes the distance between 2 points defined by
+ P1 = (x1,y1) and P2 = (x2,y2) """
+ return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5
+
+def optimized_path(coords, start=None):
+ """This function finds the nearest point to a point
+ coords should be a list in this format coords = [ [x1, y1], [x2, y2] , ...]
+ https://stackoverflow.com/questions/45829155/sort-points-in-order-to-have-a-continuous-curve-using-python"""
+ if isinstance(coords,list) == False:
+ coords = coords.tolist()
+ if 0 :
+ if isinstance(start,list) == False:
+ try:
+ start = start.tolist()
+ except:
+ start = list(start)
+ if start is None:
+ start = coords[0]
+ pass_by = coords
+ path = [start]
+ pass_by.remove(start)
+ while pass_by:
+ nearest = min(pass_by, key=lambda x: distance(path[-1], x))
+ path.append(nearest)
+ pass_by.remove(nearest)
+ path = np.array(path)
+ return path
+
+
+
+def intersection(center, radius, p1, p2):
+
+ """ find the two points where a secant intersects a circle """
+
+ dx, dy = p2[0] - p1[0], p2[1] - p1[1]
+
+ a = dx**2 + dy**2
+ b = 2 * (dx * (p1[0] - center[0]) + dy * (p1[1] - center[1]))
+ c = (p1[0] - center[0])**2 + (p1[1] - center[1])**2 - radius**2
+
+ discriminant = b**2 - 4 * a * c
+ assert (discriminant > 0), 'Not a secant!'
+
+ t1 = (-b + discriminant**0.5) / (2 * a)
+ t2 = (-b - discriminant**0.5) / (2 * a)
+
+ return (dx * t1 + p1[0], dy * t1 + p1[1]), (dx * t2 + p1[0], dy * t2 + p1[1])
+
+def ML_prepare_hydrophobic(coords_in):
+ coords = coords_in
+ coords[:,1] = - coords[:,1] # flip
+ #print('length of coords: ',len(coords))
+
+ # isolate the top of the contour so excess surface can be deleted
+ percent = 0.1
+ bottom = []
+ top = [] # will need this later
+ div_line_value = min(coords[:,[1]]) + (max(coords[:,[1]]) - min(coords[:,[1]]))*percent
+ for n in coords:
+ if n[1] < div_line_value:
+ bottom.append(n)
+ else:
+ top.append(n)
+
+ bottom = np.array(bottom)
+ top = np.array(top)
+
+ del_indexes = []
+ for index,coord in enumerate(coords):
+ if coord[0]>max(top[:,0]) or coord[0] xapex:
+ r_drop.append(n)
+ l_drop = np.array(l_drop)
+ r_drop = np.array(r_drop)
+
+ #print('length of left drop is: ',len(l_drop))
+ #print('length of right drop is: ', len(r_drop))
+
+ # transpose both half drops so that they both face right and the apex of both is at 0,0
+ #r_drop[:,[0]] = r_drop[:,[0]] - min(r_drop[:,[0]])
+ #l_drop[:,[0]] = -l_drop[:,[0]] + max(l_drop[:,[0]])
+ r_drop[:,[0]] = r_drop[:,[0]] - xapex
+ l_drop[:,[0]] = -l_drop[:,[0]] + xapex
+
+ counter = 0
+ CV_contours = {}
+
+ for halfdrop in [l_drop,r_drop]:
+ if halfdrop[0,1] global_max_len:
+ # global_max_len = len(X)
+
+ if len(X)>input_len:
+ print(len(X))
+ raise Exception("Contour of length "+str(len(X))+" is too long for the designated output dimensionality of ("+str(input_len)+",2)")
+
+ for i in range(input_len):
+ if i < len(X):
+ a = X[i]
+ b = Z[i]
+ coord = [a,b]
+ coordinates.append(coord)
+ else:
+ coordinates.append([0,0])
+ if 0:
+ jet= plt.get_cmap('jet')
+ colors = iter(jet(np.linspace(0,1,len(coordinates))))
+ for k in coordinates:
+ plt.plot(k[0],k[1], 'o',color=next(colors))
+ plt.title('Halfdrop')
+ plt.show()
+ plt.close()
+ #key = image.split('/')[-1].split('_')[-1][:-4]
+ key = counter
+ CV_contours[key]= np.array(coordinates)
+
+ counter += 1
+
+ pred_ds = np.zeros((2,input_len,2))
+ for counter in [0,1]:
+ pred_ds[counter] = CV_contours[counter]
+
+ return pred_ds
diff --git a/modules/select_regions.pyc b/modules/select_regions.pyc
new file mode 100644
index 0000000..e2198ea
Binary files /dev/null and b/modules/select_regions.pyc differ
diff --git a/modules/select_regions.py~ b/modules/select_regions.py~
new file mode 100644
index 0000000..251c77b
--- /dev/null
+++ b/modules/select_regions.py~
@@ -0,0 +1,345 @@
+#!/usr/bin/env python
+#coding=utf-8
+from __future__ import print_function
+# from classes import ExperimentalDrop
+# from subprocess import call
+# import numpy as np
+import cv2
+import numpy as np
+import matplotlib.pyplot as plt
+# import time
+# import datetime
+# from Tkinter import *
+# import tkFileDialog
+import sys
+# import os
+
+MAX_IMAGE_TO_SCREEN_RATIO = 0.8
+
+def set_drop_region(experimental_drop, experimental_setup):
+ # select the drop and needle regions in the image
+ screen_size = experimental_setup.screen_resolution
+ image_size = experimental_drop.image.shape
+ scale = set_scale(image_size, screen_size)
+ screen_position = set_screen_position(screen_size)
+ experimental_setup.drop_region = user_ROI(experimental_drop.image, 'Select drop region', scale, screen_position)
+ # experimental_setup.needle_region = user_line(experimental_drop.image, 'Select needle region', scale, screen_position)
+
+
+def set_surface_line(experimental_drop, experimental_setup):
+ # select the drop and needle regions in the image
+ screen_size = experimental_setup.screen_resolution
+ image_size = experimental_drop.image.shape
+ scale = set_scale(image_size, screen_size)
+ screen_position = set_screen_position(screen_size)
+ experimental_drop.contact_angles = user_line(experimental_drop.image, experimental_drop.drop_data,'Define surface line', scale, screen_position,experimental_drop.surface_data,experimental_setup.drop_region)
+
+def set_scale(image_size, screen_size):
+ x_ratio = image_size[1]/float(screen_size[0])
+ y_ratio = image_size[0]/float(screen_size[1])
+ max_ratio = max(x_ratio, y_ratio)
+ scale = 1
+ if max_ratio > MAX_IMAGE_TO_SCREEN_RATIO:
+ scale = MAX_IMAGE_TO_SCREEN_RATIO / max_ratio
+ return scale
+
+def set_screen_position(screen_size):
+ prec_free_space = 0.5 * (1 - MAX_IMAGE_TO_SCREEN_RATIO) # percentage room free
+ x_position = int(prec_free_space * screen_size[0])
+ y_position = int(0.5 * prec_free_space * screen_size[1]) # 0.5 moves window a little bit higher
+ return [x_position, y_position]
+
+
+def user_ROI(raw_image, title, scale, screen_position): #, line_colour=(0, 0, 255), line_thickness=2):
+ global drawing
+ global ix, iy
+ global fx, fy
+ global image_TEMP
+ global img
+ # raw_image = raw_image2
+ # raw_image = np.flipud(cv2.cvtColor(raw_image2,cv2.COLOR_GRAY2BGR))
+ # raw_image = np.flipud(raw_image2)
+ drawing = False # true if mouse is pressed
+ ix,iy = -1,-1
+ fx,fy = -1,-1
+
+ cv2.namedWindow(title, cv2.WINDOW_AUTOSIZE)
+ cv2.moveWindow(title, screen_position[0], screen_position[1])
+ cv2.setMouseCallback(title, draw_rectangle)
+ #scale =1
+ image_TEMP = cv2.resize(raw_image, (0,0), fx=scale, fy=scale)
+
+ img = image_TEMP.copy()
+
+ while(1):
+ cv2.imshow(title,img)
+
+ k = cv2.waitKey(1) & 0xFF
+ if k != 255:
+ if (k == 13) or (k == 32):
+ # either 'return' or 'space' pressed
+ # break
+ if ((fx - ix) * (fy - iy)) != 0: # ensure there is an enclosed region
+ break
+ if (k == 27):
+ # 'esc'
+ kill()
+
+ cv2.destroyAllWindows()
+ min_x = min(ix, fx) / scale
+ max_x = max(ix, fx) / scale
+ min_y = min(iy, fy) / scale
+ max_y = max(iy, fy) / scale
+ return [(min_x, min_y), (max_x, max_y)]
+
+
+def user_line(raw_image, drop_data,title, scale, screen_position,line,region): #, line_colour=(0, 0, 255), line_thickness=2):
+ global drawing
+ global ix, iy
+ global fx, fy
+ global image_TEMP
+ global img
+ # raw_image = raw_image2
+ # raw_image = np.flipud(cv2.cvtColor(raw_image2,cv2.COLOR_GRAY2BGR))
+ # raw_image = np.flipud(raw_image2)
+ drawing = True # true if mouse is pressed
+ ix,iy = -1,-1
+ fx,fy = -1,-1
+
+ region = np.floor(region)
+ # print(region)
+
+ # print(region[0,0])
+ # print(region[1,0])
+ # print(region[0,1])
+ # print(region[1,1])
+
+ # cv2.setMouseCallback(title, draw_line)
+
+ scale = 1
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+
+ img = image_TEMP.copy()
+
+ xx = np.array([0,img.shape[1]])
+ yy = line(xx)
+
+ ix0,fx0 = xx.astype(int)
+ iy0,fy0 = yy.astype(int)
+
+ ix,fx = ix0,fx0
+ iy,fy = iy0,fy0
+
+ cv2.namedWindow(title, cv2.WINDOW_AUTOSIZE)
+ cv2.moveWindow(title, screen_position[0], screen_position[1])
+
+
+
+
+ # for i in drop_data:
+ # cx,cy = i
+ # print(cx,cy)
+ #cv2.circle(raw_image,(int(cx),int(-cy)),10,(255,255,255),0)
+
+ while(1):
+ cv2.imshow(title,img)
+ #cv2.circle(img,(200,200),5,(255,255,0),2)
+ cv2.line(img,(ix,iy),(fx,fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+ #Plot pixels above line
+ #cv2.waitKey(0)
+ v1 = (ix-fx,iy-fy) #(1,coefficients[1]) # Vector 1
+
+ drop = []
+ for i in drop_data:
+ cx,cy = i
+ v2 = (cx-ix, cy-iy) # Vector 1
+ xp = v1[0]*v2[1] - v1[1]*v2[0] # Cross product
+ if xp > 0:
+ drop.append((cx,cy))
+ cv2.circle(img,(int(cx),int(cy)),2,(255,255,255),2)
+
+ drop = np.asarray(drop)
+
+ delta = 1
+ Npts = 30
+ Ndrop = np.shape(drop)[0]
+ pts1 = np.zeros((Npts,2))
+ pts2 = np.zeros((Npts,2))
+
+ for i in range(Npts):
+ pts1[i,:] = drop[delta*i,:]
+ pts2[i,:] = drop[Ndrop-1-delta*i,:]
+ cv2.circle(img,(int(pts1[i,0]),int(pts1[i,1])),2,(0,0,255),2)
+ cv2.circle(img,(int(pts2[i,0]),int(pts2[i,1])),2,(0,0,255),2)
+
+ fit_local1 = np.polyfit(pts1[:,0], pts1[:,1] ,2)
+ fit_local2 = np.polyfit(pts2[:,0], pts2[:,1] ,2)
+
+ line_local1 = np.poly1d(fit_local1)
+ line_local2 = np.poly1d(fit_local2)
+
+ x_local1 = np.array([min(pts1[:,0])-10,max(pts1[:,0])+10])
+ f_local1 = line_local1(x_local1)
+ f_local1_prime = line_local1.deriv(1)
+
+ x_local2 = np.array([min(pts2[:,0])-10,max(pts2[:,0])+10])
+ f_local2 = line_local2(x_local2)
+ f_local2_prime = line_local2.deriv(1)
+
+ tangent1 = f_local1_prime(pts1[0,0])*(x_local1-pts1[0,0])+pts1[0,1]
+ tangent2 = f_local2_prime(pts2[0,0])*(x_local2-pts2[0,0])+pts2[0,1]
+
+ cv2.line(img,(int(x_local1[0]),int(tangent1[0])),(int(x_local1[1]),int(tangent1[1])), (255, 0, 0), 2)
+ cv2.line(img,(int(x_local2[0]),int(tangent2[0])),(int(x_local2[1]),int(tangent2[1])), (255, 0, 0), 2)
+
+ m1 = f_local1_prime(pts1[0,0])
+ m2 = f_local2_prime(pts2[0,0])
+
+ m_surf = float(iy-fy)/float(ix-fx)
+
+
+ if (m1 > 0):
+ contact_angle1 = np.pi-np.arctan((m1-m_surf)/(1+m1*m_surf))
+ elif(m1 < 0):
+ contact_angle1 = -np.arctan((m1-m_surf)/(1+m1*m_surf))
+ else:
+ contact_angle1 = np.pi/2
+
+ if (m2 < 0):
+ contact_angle2 = np.pi+np.arctan((m2-m_surf)/(1+m2*m_surf))
+ elif(m2 > 0):
+ contact_angle2 = np.arctan((m2-m_surf)/(1+m2*m_surf))
+ else:
+ contact_angle1 = np.pi/2
+
+ contact_angle1 = contact_angle1*180/np.pi
+ contact_angle2 = contact_angle2*180/np.pi
+
+ k = cv2.waitKey(1) & 0xFF
+ #print(k)
+ if k != 255:
+
+ if (k == 13) or (k == 32):
+ # either 'return' or 'space' pressed
+ # break
+ if ((fx - ix) * (fy - iy)) != 0: # ensure there is an enclosed region
+ break
+ if (k == 27):
+ # 'esc'
+ kill()
+ if (k==-1):
+ continue
+ if (k == 0): #up key (down on image)
+ fy = fy+1
+ iy = iy+1
+
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+ if (k == 1): #down key (up on image)
+ fy = fy-1
+ iy = iy-1
+
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+
+
+ if (k == 111): #"o" key
+ fx,fy = fx0,fy0
+ ix,iy = ix0,iy0
+
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+
+
+ if (k == 2) or (k == 3): #83: right key (Clockwise)
+ x0 = np.array([ix,iy])
+ x1 = np.array([fx,fy])
+ xc = 0.5*(x0+x1)
+ theta = 1.0/180*np.pi
+ if (k == 2): #left key
+ theta = -theta
+
+ rotation = np.zeros((2,2))
+ rotation[0,0] = np.cos(theta)
+ rotation[0,1] = -np.sin(theta)
+ rotation[1,0] = np.sin(theta)
+ rotation[1,1] = np.cos(theta)
+
+ x0r = np.dot(rotation,(x0-xc).T)+xc
+ x1r = np.dot(rotation,(x1-xc).T)+xc
+
+ ix,iy = x0r.astype(int)
+ fx,fy = x1r.astype(int)
+
+ image_TEMP = cv2.resize(raw_image[int(region[0,1]):int(region[1,1]),int(region[0,0]):int(region[1,0])], (0,0), fx=scale, fy=scale)
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness) cv2.line
+
+ if (k == 112): #'p' key
+ print(contact_angle1,contact_angle2)
+ #print(m1,m2)
+
+ if (k == -1):
+ continue
+# else:
+# print(k)
+
+
+ cv2.destroyAllWindows()
+ min_x = min(ix, fx) / scale
+ max_x = max(ix, fx) / scale
+ min_y = min(iy, fy) / scale
+ max_y = max(iy, fy) / scale
+ return [contact_angle1,contact_angle2]
+
+# mouse callback function
+def draw_rectangle(event,x,y,flags,param):
+ global ix,iy,drawing
+ global fx, fy
+ global image_TEMP
+ global img
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ img = image_TEMP.copy()
+ drawing = True
+ ix,iy = x,y
+
+ elif event == cv2.EVENT_MOUSEMOVE:
+ if drawing == True:
+ img = image_TEMP.copy()
+ cv2.rectangle(img,(ix,iy),(x,y), (0, 0, 255), 2)# line_colour,line_thickness)
+
+ elif event == cv2.EVENT_LBUTTONUP:
+ img = image_TEMP.copy()
+ drawing = False
+ fx, fy = x, y
+ cv2.rectangle(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+
+# mouse callback function
+def draw_line(event,x,y,flags,param):
+ global ix,iy,drawing
+ global fx, fy
+ global image_TEMP
+ global img
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ img = image_TEMP.copy()
+ drawing = True
+ ix,iy = x,y
+
+ elif event == cv2.EVENT_MOUSEMOVE:
+ if drawing == True:
+ img = image_TEMP.copy()
+ cv2.line(img,(ix,iy),(x,y), (0, 0, 255), 2)# line_colour,line_thickness)
+
+ elif event == cv2.EVENT_LBUTTONUP:
+ img = image_TEMP.copy()
+ drawing = False
+ fx, fy = x, y
+ cv2.line(img,(ix,iy),(fx, fy), (0, 255, 0), 2)# #line_colour,line_thickness)
+
+def kill():
+ sys.exit()
diff --git a/modules/user_interface.py b/modules/user_interface.py
new file mode 100644
index 0000000..3d8cd2b
--- /dev/null
+++ b/modules/user_interface.py
@@ -0,0 +1,719 @@
+
+#!/usr/bin/env python
+#coding=utf-8
+
+try:
+ # for Python2
+ import Tkinter as tk
+ import tkFileDialog
+ import tkFont
+ import ttk
+except ImportError:
+ # for Python3
+ import tkinter as tk
+ from tkinter import filedialog as tkFileDialog
+ from tkinter import font as tkFont
+ from tkinter import ttk
+# from ttk import *
+
+import webbrowser
+import sys
+import os
+import csv
+
+# from classes import ExperimentalSetup
+
+IMAGE_EXTENSION='.png'
+
+BACKGROUND_COLOR='gray90'
+# BACKGROUND_COLOR='SlateGray1'
+# BACKGROUND_COLOR='red'
+VERSION='1.0'
+
+DROP_ID_OPTIONS = ["Automated", "User-selected"]
+THRESHOLD_OPTIONS = ["Automated", "User-selected"]
+BASELINE_OPTIONS = ["Automated", "User-selected"]
+NEEDLE_OPTIONS = ['0.7176', '1.270', '1.651']
+IMAGE_SOURCE_OPTIONS = ["Flea3", "USB camera", "Local images"]
+EDGEFINDER_OPTIONS = ["OpenCV", "Subpixel", "Both"]
+
+PATH_TO_SCRIPT = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..')
+PATH_TO_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),"parameters.csv")
+
+# FONT_FRAME_LABEL = ("Helvetica", 16, "BOLD")
+FONT_FRAME_LABEL = '*-*-medium-r-normal--*-160-*'
+
+LABEL_WIDTH = 29
+ENTRY_WIDTH = 11
+
+# fullPathName = os.path.abspath(os.path.dirname(sys.argv[0]))
+def call_user_input(user_input_data):
+ UserInterface(user_input_data)
+
+class UserInterface(tk.Toplevel):
+ def __init__(self, user_input_data):
+ self.initialise = True # need this flag to disable float and integer checking for inputs
+ #print("true")
+ self.root = tk.Tk()
+ #print("root")
+ self.root.geometry("+100+100")
+ #print("resoA")
+ self.screen_resolution = [self.root.winfo_screenwidth(), self.root.winfo_screenheight()]
+ #print("resoB")
+ self.root.lift()
+ self.root.call('wm', 'attributes', '.', '-topmost', True)
+ self.root.after_idle(self.root.call, 'wm', 'attributes', '.', '-topmost', False)
+
+ self.root.title('Conan the Barbarian v '+VERSION)
+
+ self.root.configure(height=25, background=BACKGROUND_COLOR, padx=50)
+ self.create_title()
+ self.create_user_inputs()
+ self.create_plotting_checklist()
+ self.create_analysis_checklist()
+ self.create_save_location()
+ self.create_image_acquisition()
+ # self.create_save_box()
+ self.create_run_quit(user_input_data)
+ self.create_homepage_url()
+
+ self.import_parameters()
+
+
+
+ self.initialise = False # need this to setup entry widgets validation
+
+ self.root.mainloop()
+
+ def create_title(self):
+ title_frame = tk.Frame(self.root)
+ title_frame.config(background=BACKGROUND_COLOR)
+ title_frame.grid(row=0, columnspan=3, rowspan=1, padx=30, pady=10)
+ # Label(title_frame, text="Open drop", font=("Helvetica", 36), justify=CENTER, background="lavender").grid(row=0, sticky=N)
+ tk.Label(title_frame, text="CONtact ANgle", background=BACKGROUND_COLOR, font=("Helvetica", 36), anchor="center").grid(row=0)
+
+ def create_user_inputs(self):
+ user_input_frame = tk.LabelFrame(self.root, text="User inputs", padx=30, pady=10)
+ user_input_frame.config(background=BACKGROUND_COLOR)
+ user_input_frame.grid(row=1, column=0, columnspan=2, sticky="wens", padx=15, pady=15) #, rowspan=3
+
+ self.drop_ID_method = OptionMenuStyle(self, user_input_frame, "Drop ID method:", DROP_ID_OPTIONS, rw=0)
+ self.threshold_method = OptionMenuStyle(self, user_input_frame, "Threshold value selection method:", THRESHOLD_OPTIONS, rw=1)
+ self.threshold_val = FloatEntryStyle(self, user_input_frame, "Threshold value (ignored if method=Automated):", rw=2,state_specify='normal') #, label_width=LABEL_WIDTH)
+ self.baseline_method = OptionMenuStyle(self, user_input_frame, "Baseline selection method:", BASELINE_OPTIONS, rw=3)
+ self.density_outer = FloatEntryStyle(self, user_input_frame, "Continuous density (kg/m"u"\u00b3""):", rw=4,state_specify='disabled') #, label_width=LABEL_WIDTH)
+ self.needle_diameter = FloatComboboxStyle(self, user_input_frame, "Needle diameter (mm):", NEEDLE_OPTIONS, rw=5,state_specify='disabled') #, label_width=LABEL_WIDTH)
+
+ user_input_frame.grid_columnconfigure(0, minsize=LABEL_WIDTH)
+
+ def create_plotting_checklist(self):
+ plotting_clist_frame = tk.LabelFrame(self.root, text="To view during fitting", padx=30, pady=10) #, height=15)
+ plotting_clist_frame.config(background=BACKGROUND_COLOR)
+ plotting_clist_frame.grid(row=1, column=2, columnspan=1, sticky="wens", padx=15, pady=15) #, rowspan=3
+
+ self.residuals_boole = CheckButtonStyle(self, plotting_clist_frame, "Residuals", rw=0, cl=0,state_specify='disabled')
+ self.profiles_boole = CheckButtonStyle(self, plotting_clist_frame, "Profiles", rw=1, cl=0,state_specify='disabled')
+ self.IFT_boole = CheckButtonStyle(self, plotting_clist_frame, "Physical quantities", rw=2, cl=0,state_specify='disabled')
+
+ def create_analysis_checklist(self):
+ analysis_clist_frame = tk.LabelFrame(self.root, text="Analysis methods", padx=30, pady=10) #, height=15)
+ analysis_clist_frame.config(background=BACKGROUND_COLOR)
+ analysis_clist_frame.grid(row=3, columnspan=4, sticky="wens", padx=15, pady=15) #, rowspan=3
+
+ self.tangent_boole = CheckButtonStyle(self, analysis_clist_frame, "First-degree polynomial fit", rw=0, cl=0)
+ self.second_deg_polynomial_boole = CheckButtonStyle(self, analysis_clist_frame, "Second-degree polynomial fit", rw=1, cl=0)
+ self.circle_boole = CheckButtonStyle(self, analysis_clist_frame, "Circle fit", rw=2, cl=0)
+ self.ellipse_boole = CheckButtonStyle(self, analysis_clist_frame, "Ellipse fit", rw=0, cl=1)
+ self.YL_boole = CheckButtonStyle(self, analysis_clist_frame, "Young-Laplace fit", rw=1, cl=1)
+ self.ML_boole = CheckButtonStyle(self, analysis_clist_frame, "ML model", rw=2, cl=1)
+
+ #self.residuals_boole = CheckButtonStyle(self, clist_frame, "Residuals", rw=0, cl=0,state_specify='disabled')
+ #self.profiles_boole = CheckButtonStyle(self, clist_frame, "Profiles", rw=1, cl=0,state_specify='disabled')
+ #self.IFT_boole = CheckButtonStyle(self, clist_frame, "Physical quantities", rw=2, cl=0,state_specify='disabled')
+
+ def create_save_location(self):
+ location_frame = tk.LabelFrame(self.root, text="Output data location", height=15, padx=30, pady=10)
+ location_frame.config(background=BACKGROUND_COLOR)
+ location_frame.grid(row=4, columnspan=3, rowspan=1, sticky="w", padx=15, pady=10)
+
+ self.directory = DirectoryEntryStyle(self.root, location_frame, "Location:", rw=0, entry_width=50)
+
+ self.filename_string = TextEntryStyle(self, location_frame, "Filename:", rw=1, width_specify=20, stckyE="ew")
+ self.filename_extension = tk.Label(location_frame, text="[YYYY-MM-DD-hhmmss].[ext]", background=BACKGROUND_COLOR)
+ self.filename_extension.grid(row=1, column=2, columnspan=2, sticky="w")
+ location_frame.columnconfigure(1,weight=1)
+
+ def create_image_acquisition(self):
+ image_acquisition_frame = tk.LabelFrame(self.root, text="Image acquisition", height=15, padx=30, pady=10)
+ image_acquisition_frame.config(background=BACKGROUND_COLOR)
+ image_acquisition_frame.grid(row=5, columnspan=4, rowspan=1, sticky="we",padx=15, pady=10)
+ # image_acquisition_frame.grid_columnconfigure(0, minsize=50)
+ image_acquisition_frame.grid_columnconfigure(2, weight=1)
+
+ self.image_source = OptionMenuStyle(self, image_acquisition_frame, "Image source:", IMAGE_SOURCE_OPTIONS, rw=0, label_width=12) #(LABEL_WIDTH-ENTRY_WIDTH))
+
+ # self.number_frames = IntegerEntryStyle(self, image_acquisition_frame, "Number of frames:", rw=0, cl=3, pdx=10)
+ # self.wait_time = IntegerEntryStyle(self, image_acquisition_frame, "Wait time (s):", rw=1, cl=3, pdx=10)
+
+ # self.directory = DirectoryEntryStyle(self.root, self.save_info_frame, "Location:", rw=3, entry_width=50)
+
+ # image_acquisition_frame.grid_columnconfigure(3, minsize=LABEL_WIDTH)
+ # self.image_source.text_variable.trace_variable('w',self.propogate_state)
+ self.image_source.text_variable.trace_variable('w',self.propogate_state)
+
+ self.edgefinder = OptionMenuStyle(self, image_acquisition_frame, "Edge finder:", EDGEFINDER_OPTIONS, rw=1,label_width=12) # added by DS 31/5/21
+
+ self.number_frames = IntegerEntryStyle(self, image_acquisition_frame, "Number of frames:", rw=3, cl=0, pdx=10)
+ self.wait_time = IntegerEntryStyle(self, image_acquisition_frame, "Wait time (s):", rw=4, cl=0, pdx=10)
+
+ self.save_images_boole = CheckButtonStyle(self, image_acquisition_frame, "Save image", rw=3, cl=3)
+ self.create_new_dir_boole = CheckButtonStyle(self, image_acquisition_frame, "Create new directory", rw=4, cl=3)#, pdx=50)
+ self.save_images_boole.int_variable.trace_variable('w',self.check_button_changed)
+
+
+ def propogate_state(self, *args):
+ if self.image_source.get_value()=="Local images":
+ self.save_images_boole.disable()
+ self.create_new_dir_boole.disable()
+ # self.filename_string.disable()
+ # self.directory.disable()
+ # self.filename_extension.config(state="disable")
+ else:
+ self.save_images_boole.normal()
+ self.check_button_changed()
+
+
+
+
+
+
+
+ # def create_save_box(self):
+ # self.save_info_frame = tk.LabelFrame(self.root, text="Save images", height=15, padx=30, pady=10)
+ # self.save_info_frame.config(background=BACKGROUND_COLOR)
+ # self.save_info_frame.grid(row=6, columnspan=4, rowspan=4, sticky="w", padx=15, pady=10)
+ # self.save_images_boole = CheckButtonStyle(self, self.save_info_frame, "Save image", rw=0)
+ # self.create_new_dir_boole = CheckButtonStyle(self, self.save_info_frame, "Create new directory", rw=0, cl=3, pdx=50)
+
+
+ # # self, parent, frame, text_left, rw=0, cl=0, width_specify=10, pdx=0, pdy=2, stcky="w")
+
+
+ # # self.filename_string = TextEntryStyle(self, self.save_info_frame, "Filename:", rw=2, width_specify=20, stckyE="ew")
+ # # self.filename_string.default_string = "Extracted_data"
+ # # self.filename_extension = tk.Label(self.save_info_frame, text="[YYYY-MM-DD-hhmmss]"+IMAGE_EXTENSION, background=BACKGROUND_COLOR)
+ # # self.filename_extension.grid(row=2, column=2, sticky="w")
+ # # self.save_info_frame.columnconfigure(1,weight=1)
+
+ # # self.directory = DirectoryEntryStyle(self.root, self.save_info_frame, "Location:", rw=3, entry_width=50)
+
+ # self.save_info_frame.columnconfigure(0, weight=1)
+ # self.save_info_frame.columnconfigure(1, weight=1)
+ # # self.save_info_frame.columnconfigure(4, weight=1)
+
+ # self.save_images_boole.int_variable.trace_variable('w',self.check_button_changed)
+
+
+
+ def check_button_changed(self, *args):
+ if self.save_images_boole.get_value():
+ self.create_new_dir_boole.normal()
+ # self.filename_string.normal()
+ # self.directory.normal()
+ # self.filename_extension.config(state="normal")
+ else:
+ self.create_new_dir_boole.disable()
+ # self.filename_string.disable()
+ # self.directory.disable()
+ # self.filename_extension.config(state="disable")
+
+
+
+ # def update_directory(self):
+ # directory = os.path.dirname(os.path.realpath(__file__))
+ # self.output_location_string = tkFileDialog.askdirectory(parent = self.root, title="Select output data location", initialdir=directory)
+ # self.output_location_text.config(text = self.clip_dir(self.output_location_string))
+
+
+ # def clip_dir(self, string):
+ # MAX_DIR_LEN=20
+ # if len(string) > MAX_DIR_LEN:
+ # return "..." + string[-(MAX_DIR_LEN+3):]
+ # else:
+ # return string
+ # # return string
+
+
+ def create_run_quit(self, user_input_data):
+ # run_quit_frame = LabelFrame(self.root, text="ys", height=15)
+ run_quit_frame = tk.Frame(self.root)
+ run_quit_frame.config(background=BACKGROUND_COLOR)
+ run_quit_frame.grid(row=22, columnspan=5, padx=10, pady=10, sticky="we")
+ # save_images_run = tk.Button(run_quit_frame, text='Run', highlightbackground=BACKGROUND_COLOR, command=self.run) # , state='disabled'
+ save_images_run = tk.Button(run_quit_frame, text='Run', highlightbackground=BACKGROUND_COLOR, command=lambda: self.run(user_input_data)) # , state='disabled'
+ save_images_quit = tk.Button(run_quit_frame, text='Quit', highlightbackground=BACKGROUND_COLOR, command=self.quit)
+
+ # self.root.bind("", lambda _: self.callback_run(user_input_data))
+ # self.root.bind("", self.callback_quit)
+ self.root.bind("", lambda _: self.run(user_input_data))
+ self.root.bind("", lambda _: self.quit())
+
+
+ run_quit_frame.columnconfigure(0, weight=1)
+ run_quit_frame.columnconfigure(2, weight=1)
+ run_quit_frame.columnconfigure(4, weight=1)
+
+
+ # save_images_run.grid(row=0, column=1, sticky="we")#padx=15, pady=10, sticky=W+E)
+ # save_images_quit.grid(row=0, column=3, sticky="we")#padx=15, pady=10, sticky=W+E)
+ save_images_quit.grid(row=0, column=1, sticky="we")#padx=15, pady=10, sticky=W+E)
+ save_images_run.grid(row=0, column=3, sticky="we")#padx=15, pady=10, sticky=W+E)
+
+
+ def create_homepage_url(self):
+ homepage_frame = tk.Frame(self.root)
+ homepage_frame.config(background=BACKGROUND_COLOR)
+ homepage_frame.grid(row=23, columnspan=4, padx=40, pady=10, sticky="e")
+
+ self.label_link = tk.Label(homepage_frame, text="opencolloids.com", highlightbackground=BACKGROUND_COLOR, background=BACKGROUND_COLOR, fg="blue", cursor="arrow")#"hand2")
+ self.link_font = tkFont.Font(self.label_link, self.label_link.cget("font"))
+ self.link_font_underline = tkFont.Font(self.label_link, self.label_link.cget("font"))
+ self.link_font_underline.configure(underline = True)
+
+ self.label_link.bind("", self.homepage_url_callback)
+ self.label_link.grid(row=0,column=0)
+ self.label_link.bind("", self.underline_link)
+ self.label_link.bind("", self.remove_underline_link)
+
+ def homepage_url_callback(self, event):
+ webbrowser.open_new(r"http://www.opencolloids.com")
+
+ def underline_link(self, event):
+ self.label_link.config(text="opencolloids.com", font=self.link_font_underline, fg="navy")# underline = True)
+
+ def remove_underline_link(self, event):
+ self.label_link.config(text="opencolloids.com", font=self.link_font, fg="blue")# underline = False)
+
+
+ def run(self, user_input_data):
+ self.update_user_settings(user_input_data)
+ self.export_parameters()
+ if self.image_source.get_value() == "Local images":
+ user_input_data.import_files = tkFileDialog.askopenfilenames(parent = self.root, title="Select files", initialdir=PATH_TO_SCRIPT)
+ user_input_data.number_of_frames = len(user_input_data.import_files)
+ # if self.create_new_dir_boole.get_value(): #create_folder_boole
+ # new_directory = os.path.join(user_input_data.directory_string, self.filename_string.get_value())
+ # os.makedirs(new_directory)
+ # user_input_data.directory_string = new_directory
+
+ # if user doesnt select files - abort
+ if user_input_data.number_of_frames == 0:
+ sys.exit()
+
+
+ self.root.destroy()
+
+
+ def quit(self):
+ sys.exit()
+
+ def import_parameters(self):
+ if os.path.exists(PATH_TO_FILE):
+ data = []
+ writer = csv.reader(open(PATH_TO_FILE, 'r'))
+ for row in writer:
+ data.append(row)
+ self.drop_ID_method.set_value(data[0][1])
+ self.threshold_method.set_value(data[1][1])
+ self.threshold_val.set_value(data[2][1])
+ self.baseline_method.set_value(data[3][1])
+ self.edgefinder.set_value(data[4][1])
+ self.density_outer.set_value(data[5][1])
+ self.needle_diameter.set_value(data[6][1])
+ self.residuals_boole.set_value(data[7][1])
+ self.tangent_boole.set_value(data[8][1])
+ self.second_deg_polynomial_boole.set_value(data[9][1])
+ self.circle_boole.set_value(data[10][1])
+ self.ellipse_boole.set_value(data[11][1])
+ self.YL_boole.set_value(data[12][1])
+ self.ML_boole.set_value(data[13][1])
+ self.profiles_boole.set_value(data[14][1])
+ self.IFT_boole.set_value(data[15][1])
+
+ given_image_source = data[16][1]
+ if given_image_source in IMAGE_SOURCE_OPTIONS:
+ self.image_source.set_value(given_image_source) # set image source
+ else:
+ self.directory.set_value("")
+
+ self.number_frames.set_value(data[17][1])
+ self.wait_time.set_value(data[18][1])
+ self.save_images_boole.set_value(data[18][1]) # do this after others
+ self.create_new_dir_boole.set_value(data[20][1])
+
+ self.filename_string.set_value(data[21][1])
+ # self.
+
+ given_dir = data[13][1]
+ if os.path.isdir(given_dir):
+ self.directory.set_value(given_dir) # set given directory
+ # print(self.directory._directory_string.get_value())
+ else:
+ self.directory.set_value(os.getcwd()) # current directory of Terminal
+
+
+ def update_user_settings(self, user_input_data):
+ user_input_data.screen_resolution = self.screen_resolution
+ user_input_data.drop_ID_method = self.drop_ID_method.get_value()
+ user_input_data.threshold_method = self.threshold_method.get_value()
+ user_input_data.threshold_val = self.threshold_val.get_value()
+ user_input_data.baseline_method = self.baseline_method.get_value()
+ user_input_data.edgefinder = self.edgefinder.get_value()
+ user_input_data.continuous_density = self.density_outer.get_value()
+ user_input_data.needle_diameter_mm= self.needle_diameter.get_value()
+ user_input_data.residuals_boole = self.residuals_boole.get_value()
+ user_input_data.profiles_boole = self.profiles_boole.get_value()
+ user_input_data.interfacial_tension_boole = self.IFT_boole.get_value()
+ user_input_data.tangent_boole = self.tangent_boole.get_value()
+ user_input_data.second_deg_polynomial_boole = self.second_deg_polynomial_boole.get_value()
+ user_input_data.circle_boole = self.circle_boole.get_value()
+ user_input_data.ellipse_boole = self.ellipse_boole.get_value()
+ user_input_data.YL_boole = self.YL_boole.get_value()
+ user_input_data.ML_boole = self.ML_boole.get_value()
+ user_input_data.image_source = self.image_source.get_value()
+ user_input_data.number_of_frames = self.number_frames.get_value()
+ user_input_data.wait_time = self.wait_time.get_value()
+ user_input_data.save_images_boole = self.save_images_boole.get_value()
+ user_input_data.create_folder_boole = self.create_new_dir_boole.get_value()
+ temp_filename = self.filename_string.get_value()
+ if temp_filename == '':
+ temp_filename = "Extracted_data"
+ user_input_data.filename = temp_filename + IMAGE_EXTENSION
+ user_input_data.directory_string = self.directory.get_value()
+
+
+
+
+
+ def export_parameters(self):
+ parameter_vector = ([
+ ('Drop ID method', self.drop_ID_method.get_value()),
+ ('Image thresholding method', self.threshold_method.get_value()),
+ ('Image thresholding value', self.threshold_val.get_value()),
+ ('Baseline method', self.baseline_method.get_value()),
+ ('Edge finder', self.edgefinder.get_value()),
+ ('Continuous density', self.density_outer.get_value()),
+ ('Needle diameter',self.needle_diameter.get_value()),
+ ('Plot residuals',self.residuals_boole.get_value()),
+ ('Plot profiles',self.profiles_boole.get_value()),
+ ('Plot IFT',self.IFT_boole.get_value()),
+ ('Perform tangent fit',self.tangent_boole.get_value()),
+ ('Perform polynomial fit',self.second_deg_polynomial_boole.get_value()),
+ ('Perform circle fit',self.circle_boole.get_value()),
+ ('Perform ellipse fit',self.ellipse_boole.get_value()),
+ ('Perform YL fit',self.YL_boole.get_value()),
+ ('Perform ML model prediction',self.ML_boole.get_value()),
+ ('Image source',self.image_source.get_value()),
+ ('Number of frames',self.number_frames.get_value()),
+ ('Wait time',self.wait_time.get_value()),
+ ('Save images',self.save_images_boole.get_value()),
+ ('Create new data folder',self.create_new_dir_boole.get_value()),
+ ('Filename',self.filename_string.get_value()),
+ ('Directory',self.directory.get_value())
+ ])
+ writer = csv.writer(open(PATH_TO_FILE, 'w'))
+ for row in parameter_vector:
+ writer.writerow(row)
+
+
+
+ # def validate_float(self, value_if_allowed):
+ # if text in '0123456789.-+':
+ # try:
+ # float(value_if_allowed)
+ # return True
+ # except ValueError:
+ # return False
+ # else:
+ # return False
+ def validate_float(self, action, index, value_if_allowed,
+ prior_value, text, validation_type, trigger_type, widget_name):
+ # print "OnValidate:"
+ # print "d='%s'" % action
+ # print "i='%s'" % index
+ # print "P='%s'" % value_if_allowed
+ # print "s='%s'" % prior_value
+ # print "S='%s'" % text
+ # print "v='%s'" % validation_type
+ # print "V='%s'" % trigger_type
+ # print "W='%s'" % widget_name
+ if value_if_allowed == '':
+ return True
+ elif value_if_allowed == '.':
+ return True
+ else:
+ if text in '0123456789.-+':
+ try:
+ float(value_if_allowed)
+ return True
+ except ValueError:
+ return False
+ else:
+ return False
+
+
+ def validate_int(self, action, index, value_if_allowed,
+ prior_value, text, validation_type, trigger_type, widget_name):
+ if self.initialise == True:
+ return True
+ elif value_if_allowed == '':
+ # self.recheck_wait_state(0)
+ return True
+ elif value_if_allowed == '0':
+ return False
+ else:
+ if text in '0123456789':
+ try:
+ int_value = int(value_if_allowed)
+ # self.recheck_wait_state(int_value)
+ return True
+ except ValueError:
+ return False
+ else:
+ return False
+
+
+class IntegerEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, cl=0, pdx=0, width_specify=10):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=cl, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_int = (parent.root.register(parent.validate_int),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_int)
+ self.entry.config(width=width_specify)
+ self.entry.grid(row=rw, column=cl+1, sticky="we", padx=pdx)
+
+ def get_value(self):
+ return int("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(int(value)))
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+class FloatEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, label_width=None, width_specify=10,state_specify='normal'):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_float = (parent.root.register(parent.validate_float),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_float)
+ self.entry.config(width=width_specify,state=state_specify)
+ self.entry.grid(row=rw, column=1, sticky="we")
+
+ def get_value(self):
+ return float("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(float(value)))
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+class TextEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, width_specify=10, stckyL="w", stckyE="w"):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=0, sticky=stckyL)
+ self.text_variable = tk.StringVar()
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable)
+ self.entry.config(width=width_specify)
+ self.entry.grid(row=rw, column=1, sticky=stckyE)
+
+ def get_value(self):
+ # if self.text_variable.get() == '':
+ # self.set_value(self.default_string)
+ return self.text_variable.get()
+
+ def set_value(self, value):
+ self.text_variable.set(value)
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+
+class DirectoryEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, entry_width=20):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.directory_string = tk.StringVar()
+ self._directory_string = tk.StringVar()
+ # self.directory_string.set('~/')
+ # self.directory_string.set(os.path.dirname(os.path.realpath(__file__))) # directory of file
+ # self.directory_string.set(os.getcwd()) # current directory of Terminal
+ # self._directory_string.set(self.clip_dir(self.directory_string.get()))
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self._directory_string, state="readonly")
+ # self.entry.config(width=entry_width)
+ self.entry.config(width=49)
+ self.entry.grid(row=rw, column=1, columnspan=2, sticky="ew")
+ self.button = tk.Button(frame, text="Browse", command=lambda:self.update_directory(parent), highlightbackground=BACKGROUND_COLOR)
+ self.button.grid(row=rw, column=3, sticky="e")
+
+ def get_value(self):
+ return self.directory_string.get()
+
+ def set_value(self, value):
+ self.directory_string.set(value)
+ self._directory_string.set(self.clip_dir(self.directory_string.get()))
+
+ def disable(self):
+ self.label.config(state="disabled")
+ self.entry.config(state="disabled")
+ self.button.config(state="disabled")
+
+ def normal(self):
+ self.label.config(state="normal")
+ self.entry.config(state="normal")
+ self.button.config(state="normal")
+
+ def update_directory(self, master):
+ initdir = self.directory_string.get()
+ temp_dir = tkFileDialog.askdirectory(parent = master, title="Select output data location", initialdir=initdir)
+ if temp_dir is not "":
+ self.directory_string.set(temp_dir)
+ self._directory_string.set(self.clip_dir(temp_dir))
+
+ # clips the directory to MAX_DIR_LEN characters
+ def clip_dir(self, string):
+ MAX_DIR_LEN=50
+ if len(string) > MAX_DIR_LEN:
+ return "..." + string[-(MAX_DIR_LEN+3):]
+ else:
+ return string
+
+ def grid_forget(self):
+ self.label.grid_forget()
+ self.entry.grid_forget()
+ self.button.grid_forget()
+
+ # # this does not clip the directory...
+ # def clip_dir(self, string):
+ # return string
+
+
+
+
+class FloatComboboxStyle():
+ def __init__(self, parent, frame, text_left, options_list, rw=0, width_specify=10, label_width=None,state_specify='normal'):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_float = (parent.root.register(parent.validate_float),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.combobox = ttk.Combobox(frame, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_float)
+ self.combobox['values'] = options_list
+ self.combobox.config(width=width_specify,state=state_specify)
+ self.combobox.grid(row=rw, column=1, sticky="we")
+
+ def get_value(self):
+ return float("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(float(value)))
+
+ def disable(self):
+ self.combobox.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.combobox.config(state="normal")
+ self.label.config(state="normal")
+
+
+
+class CheckButtonStyle():
+ def __init__(self, parent, frame, text_left, rw=0, cl=0, width_specify=10, pdx=0, pdy=2, stcky="w",state_specify='normal'): #, pd=5
+ self._save_previous_variable = 0
+ self.int_variable = tk.IntVar()
+ self.check_button = tk.Checkbutton(frame, text=text_left, background=BACKGROUND_COLOR, variable=self.int_variable,state=state_specify)
+ self.check_button.grid(row=rw, column=cl, sticky=stcky, pady=pdy, padx=pdx)#"CENTER") # sticky="w" padx=pd,
+
+ def get_value(self):
+ return self.int_variable.get()
+
+ def set_value(self, value):
+ self.int_variable.set(value)
+
+ def disable(self):
+ self._save_previous_variable = self.get_value()
+ self.set_value(0)
+ self.check_button.config(state="disabled")
+
+ def normal(self):
+ self.set_value(self._save_previous_variable)
+ self.check_button.config(state="normal")
+
+ def state(self):
+ return self.check_button.config()['state'][-1]
+
+ def grid_forget(self):
+ self.check_button.grid_forget()
+
+
+class OptionMenuStyle():
+ def __init__(self, parent, frame, text_left, options_list, rw=0, width_specify=15, label_width=None):
+ self.entry_list = options_list
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width, anchor="w")
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ self.optionmenu = tk.OptionMenu(*(frame, self.text_variable) + tuple(self.entry_list))
+ self.optionmenu.config(bg = BACKGROUND_COLOR, width=width_specify)
+ self.optionmenu.grid(row=rw, column=1, sticky="w")
+
+ def get_value(self):
+ return self.text_variable.get()
+
+ def set_value(self, value):
+ if value in self.entry_list:
+ self.text_variable.set(value)
+ else:
+ self.text_variable.set(self.entry_list[0])
+
+ def disable(self):
+ self.optionmenu.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.optionmenu.config(state="normal")
+ self.label.config(state="normal")
+
+
+class LabelFrameStyle():
+ def __init__(self, parent, text_left, rw=0, cl=0, clspan=2, rwspan=1, stcky="w", pdx=15, pdy=10):
+ self = tk.LabelFrame(parent, text=text_left, padx=30, pady=10)
+ self.config(background=BACKGROUND_COLOR)
+ # self.grid(row=rw, column=cl, columnspan=clspan, rowspan=rwspan, sticky=stcky, padx=pdx, pady=pdy)
+ self.grid(row=rw, columnspan=clspan, rowspan=rwspan, sticky="w", padx=pdx, pady=pdy)
+
+
+
+if __name__ == '__main__':
+ UserInterface()
+ # ui.app()
diff --git a/modules/user_interface.pyc b/modules/user_interface.pyc
new file mode 100644
index 0000000..c63c450
Binary files /dev/null and b/modules/user_interface.pyc differ
diff --git a/modules/user_interface.py~ b/modules/user_interface.py~
new file mode 100644
index 0000000..7b14781
--- /dev/null
+++ b/modules/user_interface.py~
@@ -0,0 +1,673 @@
+
+#!/usr/bin/env python
+#coding=utf-8
+
+try:
+ # for Python2
+ import Tkinter as tk
+ import tkFileDialog
+ import tkFont
+ import ttk
+except ImportError:
+ # for Python3
+ import tkinter as tk
+ from tkinter import filedialog as tkFileDialog
+ from tkinter import font as tkFont
+ from tkinter import ttk
+# from ttk import *
+
+import webbrowser
+import sys
+import os
+import csv
+
+# from classes import ExperimentalSetup
+
+IMAGE_EXTENSION='.png'
+
+BACKGROUND_COLOR='gray90'
+# BACKGROUND_COLOR='SlateGray1'
+# BACKGROUND_COLOR='red'
+VERSION='1.0'
+
+NEEDLE_OPTIONS = ['0.7176', '1.270', '1.651']
+IMAGE_SOURCE_OPTIONS = ["Flea3", "USB camera", "Local images"]
+
+PATH_TO_SCRIPT = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..')
+PATH_TO_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)),"parameters.csv")
+
+# FONT_FRAME_LABEL = ("Helvetica", 16, "BOLD")
+FONT_FRAME_LABEL = '*-*-medium-r-normal--*-160-*'
+
+LABEL_WIDTH = 29
+ENTRY_WIDTH = 11
+
+# fullPathName = os.path.abspath(os.path.dirname(sys.argv[0]))
+def call_user_input(user_input_data):
+ UserInterface(user_input_data)
+
+class UserInterface(tk.Toplevel):
+ def __init__(self, user_input_data):
+ self.initialise = True # need this flag to disable float and integer checking for inputs
+ print("true")
+ self.root = tk.Tk()
+ print("root")
+ self.root.geometry("+100+100")
+ print("resoA")
+ self.screen_resolution = [self.root.winfo_screenwidth(), self.root.winfo_screenheight()]
+ print("resoB")
+ self.root.lift()
+ self.root.call('wm', 'attributes', '.', '-topmost', True)
+ self.root.after_idle(self.root.call, 'wm', 'attributes', '.', '-topmost', False)
+
+ self.root.title('Conan the Barbarian v '+VERSION)
+
+ self.root.configure(height=25, background=BACKGROUND_COLOR, padx=50)
+ self.create_title()
+ self.create_physical_inputs()
+ self.create_plotting_checklist()
+ self.create_save_location()
+ self.create_image_acquisition()
+ # self.create_save_box()
+ self.create_run_quit(user_input_data)
+ self.create_homepage_url()
+
+ self.import_parameters()
+
+
+
+ self.initialise = False # need this to setup entry widgets validation
+
+ self.root.mainloop()
+
+ def create_title(self):
+ title_frame = tk.Frame(self.root)
+ title_frame.config(background=BACKGROUND_COLOR)
+ title_frame.grid(row=0, columnspan=3, rowspan=1, padx=30, pady=10)
+ # Label(title_frame, text="Open drop", font=("Helvetica", 36), justify=CENTER, background="lavender").grid(row=0, sticky=N)
+ tk.Label(title_frame, text="CONtact ANgle", background=BACKGROUND_COLOR, font=("Helvetica", 36), anchor="center").grid(row=0)
+
+ def create_physical_inputs(self):
+ physical_frame = tk.LabelFrame(self.root, text="Physical inputs", padx=30, pady=10)
+ physical_frame.config(background=BACKGROUND_COLOR)
+ physical_frame.grid(row=1, column=0, columnspan=2, sticky="wens", padx=15, pady=15) #, rowspan=3
+
+ self.threshold_val = FloatEntryStyle(self, physical_frame, "Threshold value:", rw=0, width_specify=ENTRY_WIDTH,state_specify='normal') #, label_width=LABEL_WIDTH)
+ self.density_outer = FloatEntryStyle(self, physical_frame, "Continuous density (kg/m"u"\u00b3""):", rw=1,state_specify='disabled') #, label_width=LABEL_WIDTH)
+ self.needle_diameter = FloatComboboxStyle(self, physical_frame, "Needle diameter (mm):", NEEDLE_OPTIONS, rw=2,state_specify='disabled') #, label_width=LABEL_WIDTH)
+
+ physical_frame.grid_columnconfigure(0, minsize=LABEL_WIDTH)
+
+ def create_plotting_checklist(self):
+ clist_frame = tk.LabelFrame(self.root, text="To view during fitting", padx=30, pady=10) #, height=15)
+ clist_frame.config(background=BACKGROUND_COLOR)
+ clist_frame.grid(row=1, column=2, columnspan=1, sticky="wens", padx=15, pady=15) #, rowspan=3
+
+ self.residuals_boole = CheckButtonStyle(self, clist_frame, "Residuals", rw=0, cl=0,state_specify='disabled')
+ self.profiles_boole = CheckButtonStyle(self, clist_frame, "Profiles", rw=1, cl=0,state_specify='disabled')
+ self.IFT_boole = CheckButtonStyle(self, clist_frame, "Physical quantities", rw=2, cl=0,state_specify='disabled')
+
+
+ def create_image_acquisition(self):
+ image_acquisition_frame = tk.LabelFrame(self.root, text="Image acquisition", height=15, padx=30, pady=10)
+ image_acquisition_frame.config(background=BACKGROUND_COLOR)
+ image_acquisition_frame.grid(row=5, columnspan=4, rowspan=1, sticky="we",padx=15, pady=10)
+ # image_acquisition_frame.grid_columnconfigure(0, minsize=50)
+ image_acquisition_frame.grid_columnconfigure(2, weight=1)
+
+ self.image_source = OptionMenuStyle(self, image_acquisition_frame, "Image source:", IMAGE_SOURCE_OPTIONS, rw=0, label_width=12) #(LABEL_WIDTH-ENTRY_WIDTH))
+
+ # self.number_frames = IntegerEntryStyle(self, image_acquisition_frame, "Number of frames:", rw=0, cl=3, pdx=10)
+ # self.wait_time = IntegerEntryStyle(self, image_acquisition_frame, "Wait time (s):", rw=1, cl=3, pdx=10)
+
+ # self.directory = DirectoryEntryStyle(self.root, self.save_info_frame, "Location:", rw=3, entry_width=50)
+
+ # image_acquisition_frame.grid_columnconfigure(3, minsize=LABEL_WIDTH)
+ # self.image_source.text_variable.trace_variable('w',self.propogate_state)
+ self.image_source.text_variable.trace_variable('w',self.propogate_state)
+
+
+
+ self.number_frames = IntegerEntryStyle(self, image_acquisition_frame, "Number of frames:", rw=3, cl=0, pdx=10)
+ self.wait_time = IntegerEntryStyle(self, image_acquisition_frame, "Wait time (s):", rw=4, cl=0, pdx=10)
+
+ self.save_images_boole = CheckButtonStyle(self, image_acquisition_frame, "Save image", rw=3, cl=3)
+ self.create_new_dir_boole = CheckButtonStyle(self, image_acquisition_frame, "Create new directory", rw=4, cl=3)#, pdx=50)
+ self.save_images_boole.int_variable.trace_variable('w',self.check_button_changed)
+
+
+
+ def propogate_state(self, *args):
+ if self.image_source.get_value()=="Local images":
+ self.save_images_boole.disable()
+ self.create_new_dir_boole.disable()
+ # self.filename_string.disable()
+ # self.directory.disable()
+ # self.filename_extension.config(state="disable")
+ else:
+ self.save_images_boole.normal()
+ self.check_button_changed()
+
+
+ def create_save_location(self):
+ location_frame = tk.LabelFrame(self.root, text="Output data location", height=15, padx=30, pady=10)
+ location_frame.config(background=BACKGROUND_COLOR)
+ location_frame.grid(row=4, columnspan=3, rowspan=1, sticky="w", padx=15, pady=10)
+
+ self.directory = DirectoryEntryStyle(self.root, location_frame, "Location:", rw=0, entry_width=50)
+
+ self.filename_string = TextEntryStyle(self, location_frame, "Filename:", rw=1, width_specify=20, stckyE="ew")
+ self.filename_extension = tk.Label(location_frame, text="[YYYY-MM-DD-hhmmss].[ext]", background=BACKGROUND_COLOR)
+ self.filename_extension.grid(row=1, column=2, columnspan=2, sticky="w")
+ location_frame.columnconfigure(1,weight=1)
+
+
+
+
+ # def create_save_box(self):
+ # self.save_info_frame = tk.LabelFrame(self.root, text="Save images", height=15, padx=30, pady=10)
+ # self.save_info_frame.config(background=BACKGROUND_COLOR)
+ # self.save_info_frame.grid(row=6, columnspan=4, rowspan=4, sticky="w", padx=15, pady=10)
+ # self.save_images_boole = CheckButtonStyle(self, self.save_info_frame, "Save image", rw=0)
+ # self.create_new_dir_boole = CheckButtonStyle(self, self.save_info_frame, "Create new directory", rw=0, cl=3, pdx=50)
+
+
+ # # self, parent, frame, text_left, rw=0, cl=0, width_specify=10, pdx=0, pdy=2, stcky="w")
+
+
+ # # self.filename_string = TextEntryStyle(self, self.save_info_frame, "Filename:", rw=2, width_specify=20, stckyE="ew")
+ # # self.filename_string.default_string = "Extracted_data"
+ # # self.filename_extension = tk.Label(self.save_info_frame, text="[YYYY-MM-DD-hhmmss]"+IMAGE_EXTENSION, background=BACKGROUND_COLOR)
+ # # self.filename_extension.grid(row=2, column=2, sticky="w")
+ # # self.save_info_frame.columnconfigure(1,weight=1)
+
+ # # self.directory = DirectoryEntryStyle(self.root, self.save_info_frame, "Location:", rw=3, entry_width=50)
+
+ # self.save_info_frame.columnconfigure(0, weight=1)
+ # self.save_info_frame.columnconfigure(1, weight=1)
+ # # self.save_info_frame.columnconfigure(4, weight=1)
+
+ # self.save_images_boole.int_variable.trace_variable('w',self.check_button_changed)
+
+
+
+ def check_button_changed(self, *args):
+ if self.save_images_boole.get_value():
+ self.create_new_dir_boole.normal()
+ # self.filename_string.normal()
+ # self.directory.normal()
+ # self.filename_extension.config(state="normal")
+ else:
+ self.create_new_dir_boole.disable()
+ # self.filename_string.disable()
+ # self.directory.disable()
+ # self.filename_extension.config(state="disable")
+
+
+
+ # def update_directory(self):
+ # directory = os.path.dirname(os.path.realpath(__file__))
+ # self.output_location_string = tkFileDialog.askdirectory(parent = self.root, title="Select output data location", initialdir=directory)
+ # self.output_location_text.config(text = self.clip_dir(self.output_location_string))
+
+
+ # def clip_dir(self, string):
+ # MAX_DIR_LEN=20
+ # if len(string) > MAX_DIR_LEN:
+ # return "..." + string[-(MAX_DIR_LEN+3):]
+ # else:
+ # return string
+ # # return string
+
+
+ def create_run_quit(self, user_input_data):
+ # run_quit_frame = LabelFrame(self.root, text="ys", height=15)
+ run_quit_frame = tk.Frame(self.root)
+ run_quit_frame.config(background=BACKGROUND_COLOR)
+ run_quit_frame.grid(row=22, columnspan=5, padx=10, pady=10, sticky="we")
+ # save_images_run = tk.Button(run_quit_frame, text='Run', highlightbackground=BACKGROUND_COLOR, command=self.run) # , state='disabled'
+ save_images_run = tk.Button(run_quit_frame, text='Run', highlightbackground=BACKGROUND_COLOR, command=lambda: self.run(user_input_data)) # , state='disabled'
+ save_images_quit = tk.Button(run_quit_frame, text='Quit', highlightbackground=BACKGROUND_COLOR, command=self.quit)
+
+ # self.root.bind("", lambda _: self.callback_run(user_input_data))
+ # self.root.bind("", self.callback_quit)
+ self.root.bind("", lambda _: self.run(user_input_data))
+ self.root.bind("", lambda _: self.quit())
+
+
+ run_quit_frame.columnconfigure(0, weight=1)
+ run_quit_frame.columnconfigure(2, weight=1)
+ run_quit_frame.columnconfigure(4, weight=1)
+
+
+ # save_images_run.grid(row=0, column=1, sticky="we")#padx=15, pady=10, sticky=W+E)
+ # save_images_quit.grid(row=0, column=3, sticky="we")#padx=15, pady=10, sticky=W+E)
+ save_images_quit.grid(row=0, column=1, sticky="we")#padx=15, pady=10, sticky=W+E)
+ save_images_run.grid(row=0, column=3, sticky="we")#padx=15, pady=10, sticky=W+E)
+
+
+ def create_homepage_url(self):
+ homepage_frame = tk.Frame(self.root)
+ homepage_frame.config(background=BACKGROUND_COLOR)
+ homepage_frame.grid(row=23, columnspan=4, padx=40, pady=10, sticky="e")
+
+ self.label_link = tk.Label(homepage_frame, text="opencolloids.com", highlightbackground=BACKGROUND_COLOR, background=BACKGROUND_COLOR, fg="blue", cursor="arrow")#"hand2")
+ self.link_font = tkFont.Font(self.label_link, self.label_link.cget("font"))
+ self.link_font_underline = tkFont.Font(self.label_link, self.label_link.cget("font"))
+ self.link_font_underline.configure(underline = True)
+
+ self.label_link.bind("", self.homepage_url_callback)
+ self.label_link.grid(row=0,column=0)
+ self.label_link.bind("", self.underline_link)
+ self.label_link.bind("", self.remove_underline_link)
+
+ def homepage_url_callback(self, event):
+ webbrowser.open_new(r"http://www.opencolloids.com")
+
+ def underline_link(self, event):
+ self.label_link.config(text="opencolloids.com", font=self.link_font_underline, fg="navy")# underline = True)
+
+ def remove_underline_link(self, event):
+ self.label_link.config(text="opencolloids.com", font=self.link_font, fg="blue")# underline = False)
+
+
+ def run(self, user_input_data):
+ self.update_user_settings(user_input_data)
+ self.export_parameters()
+ if self.image_source.get_value() == "Local images":
+ user_input_data.import_files = tkFileDialog.askopenfilenames(parent = self.root, title="Select files", initialdir=PATH_TO_SCRIPT)
+ user_input_data.number_of_frames = len(user_input_data.import_files)
+ # if self.create_new_dir_boole.get_value(): #create_folder_boole
+ # new_directory = os.path.join(user_input_data.directory_string, self.filename_string.get_value())
+ # os.makedirs(new_directory)
+ # user_input_data.directory_string = new_directory
+
+ # if user doesnt select files - abort
+ if user_input_data.number_of_frames == 0:
+ sys.exit()
+
+
+ self.root.destroy()
+
+
+ def quit(self):
+ sys.exit()
+
+ def import_parameters(self):
+ if os.path.exists(PATH_TO_FILE):
+ data = []
+ writer = csv.reader(open(PATH_TO_FILE, 'r'))
+ for row in writer:
+ data.append(row)
+ self.threshold_val.set_value(data[0][1])
+ self.density_outer.set_value(data[1][1])
+ self.needle_diameter.set_value(data[2][1])
+ self.residuals_boole.set_value(data[3][1])
+ self.profiles_boole.set_value(data[4][1])
+ self.IFT_boole.set_value(data[5][1])
+
+ given_image_source = data[6][1]
+ if given_image_source in IMAGE_SOURCE_OPTIONS:
+ self.image_source.set_value(given_image_source) # set image source
+ else:
+ self.directory.set_value("")
+
+ self.number_frames.set_value(data[7][1])
+ self.wait_time.set_value(data[8][1])
+ self.save_images_boole.set_value(data[9][1]) # do this after others
+ self.create_new_dir_boole.set_value(data[10][1])
+
+ self.filename_string.set_value(data[11][1])
+ # self.
+
+ given_dir = data[12][1]
+ if os.path.isdir(given_dir):
+ self.directory.set_value(given_dir) # set given directory
+ # print(self.directory._directory_string.get_value())
+ else:
+ self.directory.set_value(os.getcwd()) # current directory of Terminal
+
+
+ def update_user_settings(self, user_input_data):
+ user_input_data.screen_resolution = self.screen_resolution
+ user_input_data.threshold_val = self.threshold_val.get_value()
+ user_input_data.continuous_density = self.density_outer.get_value()
+ user_input_data.needle_diameter_mm= self.needle_diameter.get_value()
+ user_input_data.residuals_boole = self.residuals_boole.get_value()
+ user_input_data.profiles_boole = self.profiles_boole.get_value()
+ user_input_data.interfacial_tension_boole = self.IFT_boole.get_value()
+ user_input_data.image_source = self.image_source.get_value()
+ user_input_data.number_of_frames = self.number_frames.get_value()
+ user_input_data.wait_time = self.wait_time.get_value()
+ user_input_data.save_images_boole = self.save_images_boole.get_value()
+ user_input_data.create_folder_boole = self.create_new_dir_boole.get_value()
+ temp_filename = self.filename_string.get_value()
+ if temp_filename == '':
+ temp_filename = "Extracted_data"
+ user_input_data.filename = temp_filename + IMAGE_EXTENSION
+ user_input_data.directory_string = self.directory.get_value()
+
+
+
+
+ def export_parameters(self):
+ parameter_vector = ([
+ ('Image thresholding', self.threshold_val.get_value()),
+ ('Continuous density', self.density_outer.get_value()),
+ ('Needle diameter',self.needle_diameter.get_value()),
+ ('Plot residuals',self.residuals_boole.get_value()),
+ ('Plot profiles',self.profiles_boole.get_value()),
+ ('Plot IFT',self.IFT_boole.get_value()),
+ ('Image source',self.image_source.get_value()),
+ ('Number of frames',self.number_frames.get_value()),
+ ('Wait time',self.wait_time.get_value()),
+ ('Save images',self.save_images_boole.get_value()),
+ ('Create new data folder',self.create_new_dir_boole.get_value()),
+ ('Filename',self.filename_string.get_value()),
+ ('Directory',self.directory.get_value())
+ ])
+ writer = csv.writer(open(PATH_TO_FILE, 'w'))
+ for row in parameter_vector:
+ writer.writerow(row)
+
+
+
+ # def validate_float(self, value_if_allowed):
+ # if text in '0123456789.-+':
+ # try:
+ # float(value_if_allowed)
+ # return True
+ # except ValueError:
+ # return False
+ # else:
+ # return False
+ def validate_float(self, action, index, value_if_allowed,
+ prior_value, text, validation_type, trigger_type, widget_name):
+ # print "OnValidate:"
+ # print "d='%s'" % action
+ # print "i='%s'" % index
+ # print "P='%s'" % value_if_allowed
+ # print "s='%s'" % prior_value
+ # print "S='%s'" % text
+ # print "v='%s'" % validation_type
+ # print "V='%s'" % trigger_type
+ # print "W='%s'" % widget_name
+ if value_if_allowed == '':
+ return True
+ elif value_if_allowed == '.':
+ return True
+ else:
+ if text in '0123456789.-+':
+ try:
+ float(value_if_allowed)
+ return True
+ except ValueError:
+ return False
+ else:
+ return False
+
+
+ def validate_int(self, action, index, value_if_allowed,
+ prior_value, text, validation_type, trigger_type, widget_name):
+ if self.initialise == True:
+ return True
+ elif value_if_allowed == '':
+ # self.recheck_wait_state(0)
+ return True
+ elif value_if_allowed == '0':
+ return False
+ else:
+ if text in '0123456789':
+ try:
+ int_value = int(value_if_allowed)
+ # self.recheck_wait_state(int_value)
+ return True
+ except ValueError:
+ return False
+ else:
+ return False
+
+
+class IntegerEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, cl=0, pdx=0, width_specify=10):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=cl, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_int = (parent.root.register(parent.validate_int),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_int)
+ self.entry.config(width=width_specify)
+ self.entry.grid(row=rw, column=cl+1, sticky="we", padx=pdx)
+
+ def get_value(self):
+ return int("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(int(value)))
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+class FloatEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, label_width=None, width_specify=10,state_specify='normal'):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_float = (parent.root.register(parent.validate_float),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_float)
+ self.entry.config(width=width_specify,state=state_specify)
+ self.entry.grid(row=rw, column=1, sticky="we")
+
+ def get_value(self):
+ return float("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(float(value)))
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+class TextEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, width_specify=10, stckyL="w", stckyE="w"):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=0, sticky=stckyL)
+ self.text_variable = tk.StringVar()
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self.text_variable)
+ self.entry.config(width=width_specify)
+ self.entry.grid(row=rw, column=1, sticky=stckyE)
+
+ def get_value(self):
+ # if self.text_variable.get() == '':
+ # self.set_value(self.default_string)
+ return self.text_variable.get()
+
+ def set_value(self, value):
+ self.text_variable.set(value)
+
+ def disable(self):
+ self.entry.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.entry.config(state="normal")
+ self.label.config(state="normal")
+
+
+class DirectoryEntryStyle():
+ def __init__(self, parent, frame, text_left, rw=0, entry_width=20):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.directory_string = tk.StringVar()
+ self._directory_string = tk.StringVar()
+ # self.directory_string.set('~/')
+ # self.directory_string.set(os.path.dirname(os.path.realpath(__file__))) # directory of file
+ # self.directory_string.set(os.getcwd()) # current directory of Terminal
+ # self._directory_string.set(self.clip_dir(self.directory_string.get()))
+ self.entry = tk.Entry(frame, highlightbackground=BACKGROUND_COLOR, textvariable=self._directory_string, state="readonly")
+ # self.entry.config(width=entry_width)
+ self.entry.config(width=49)
+ self.entry.grid(row=rw, column=1, columnspan=2, sticky="ew")
+ self.button = tk.Button(frame, text="Browse", command=lambda:self.update_directory(parent), highlightbackground=BACKGROUND_COLOR)
+ self.button.grid(row=rw, column=3, sticky="e")
+
+ def get_value(self):
+ return self.directory_string.get()
+
+ def set_value(self, value):
+ self.directory_string.set(value)
+ self._directory_string.set(self.clip_dir(self.directory_string.get()))
+
+ def disable(self):
+ self.label.config(state="disabled")
+ self.entry.config(state="disabled")
+ self.button.config(state="disabled")
+
+ def normal(self):
+ self.label.config(state="normal")
+ self.entry.config(state="normal")
+ self.button.config(state="normal")
+
+ def update_directory(self, master):
+ initdir = self.directory_string.get()
+ temp_dir = tkFileDialog.askdirectory(parent = master, title="Select output data location", initialdir=initdir)
+ if temp_dir is not "":
+ self.directory_string.set(temp_dir)
+ self._directory_string.set(self.clip_dir(temp_dir))
+
+ # clips the directory to MAX_DIR_LEN characters
+ def clip_dir(self, string):
+ MAX_DIR_LEN=50
+ if len(string) > MAX_DIR_LEN:
+ return "..." + string[-(MAX_DIR_LEN+3):]
+ else:
+ return string
+
+ def grid_forget(self):
+ self.label.grid_forget()
+ self.entry.grid_forget()
+ self.button.grid_forget()
+
+ # # this does not clip the directory...
+ # def clip_dir(self, string):
+ # return string
+
+
+
+
+class FloatComboboxStyle():
+ def __init__(self, parent, frame, text_left, options_list, rw=0, width_specify=10, label_width=None,state_specify='normal'):
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width)
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ vcmd_float = (parent.root.register(parent.validate_float),
+ '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
+ self.combobox = ttk.Combobox(frame, textvariable=self.text_variable, validate = 'key', validatecommand = vcmd_float)
+ self.combobox['values'] = options_list
+ self.combobox.config(width=width_specify,state=state_specify)
+ self.combobox.grid(row=rw, column=1, sticky="we")
+
+ def get_value(self):
+ return float("0" + self.text_variable.get())
+
+ def set_value(self, value):
+ self.text_variable.set(str(float(value)))
+
+ def disable(self):
+ self.combobox.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.combobox.config(state="normal")
+ self.label.config(state="normal")
+
+
+
+class CheckButtonStyle():
+ def __init__(self, parent, frame, text_left, rw=0, cl=0, width_specify=10, pdx=0, pdy=2, stcky="w",state_specify='normal'): #, pd=5
+ self._save_previous_variable = 0
+ self.int_variable = tk.IntVar()
+ self.check_button = tk.Checkbutton(frame, text=text_left, background=BACKGROUND_COLOR, variable=self.int_variable,state=state_specify)
+ self.check_button.grid(row=rw, column=cl, sticky=stcky, pady=pdy, padx=pdx)#"CENTER") # sticky="w" padx=pd,
+
+ def get_value(self):
+ return self.int_variable.get()
+
+ def set_value(self, value):
+ self.int_variable.set(value)
+
+ def disable(self):
+ self._save_previous_variable = self.get_value()
+ self.set_value(0)
+ self.check_button.config(state="disabled")
+
+ def normal(self):
+ self.set_value(self._save_previous_variable)
+ self.check_button.config(state="normal")
+
+ def state(self):
+ return self.check_button.config()['state'][-1]
+
+ def grid_forget(self):
+ self.check_button.grid_forget()
+
+
+class OptionMenuStyle():
+ def __init__(self, parent, frame, text_left, options_list, rw=0, width_specify=15, label_width=None):
+ self.entry_list = options_list
+ self.label = tk.Label(frame, text=text_left, background=BACKGROUND_COLOR, width=label_width, anchor="w")
+ self.label.grid(row=rw, column=0, sticky="w")
+ self.text_variable = tk.StringVar()
+ self.optionmenu = tk.OptionMenu(*(frame, self.text_variable) + tuple(self.entry_list))
+ self.optionmenu.config(bg = BACKGROUND_COLOR, width=width_specify)
+ self.optionmenu.grid(row=rw, column=1, sticky="w")
+
+ def get_value(self):
+ return self.text_variable.get()
+
+ def set_value(self, value):
+ if value in self.entry_list:
+ self.text_variable.set(value)
+ else:
+ self.text_variable.set(entry_list[0])
+
+ def disable(self):
+ self.optionmenu.config(state="disabled")
+ self.label.config(state="disabled")
+
+ def normal(self):
+ self.optionmenu.config(state="normal")
+ self.label.config(state="normal")
+
+
+class LabelFrameStyle():
+ def __init__(self, parent, text_left, rw=0, cl=0, clspan=2, rwspan=1, stcky="w", pdx=15, pdy=10):
+ self = tk.LabelFrame(parent, text=text_left, padx=30, pady=10)
+ self.config(background=BACKGROUND_COLOR)
+ # self.grid(row=rw, column=cl, columnspan=clspan, rowspan=rwspan, sticky=stcky, padx=pdx, pady=pdy)
+ self.grid(row=rw, columnspan=clspan, rowspan=rwspan, sticky="w", padx=pdx, pady=pdy)
+
+
+
+if __name__ == '__main__':
+ UserInterface()
+ # ui.app()
+
+
+
+
+
+
+
+
+
diff --git a/outputs/.DS_Store b/outputs/.DS_Store
new file mode 100644
index 0000000..5008ddf
Binary files /dev/null and b/outputs/.DS_Store differ
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..a297411
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,56 @@
+absl-py==2.0.0
+astunparse==1.6.3
+cachetools==5.3.2
+certifi @ file:///private/var/folders/sy/f16zz6x50xz3113nwtb9bvq00000gp/T/abs_477u68wvzm/croot/certifi_1671487773341/work/certifi
+charset-normalizer==3.3.2
+cycler==0.11.0
+flatbuffers==23.5.26
+fonttools==4.38.0
+gast==0.4.0
+google-auth==2.23.4
+google-auth-oauthlib==0.4.6
+google-pasta==0.2.0
+grpcio==1.59.3
+h5py==3.8.0
+idna==3.6
+importlib-metadata==6.7.0
+joblib==1.3.2
+keras==2.11.0
+kiwisolver==1.4.5
+libclang==16.0.6
+llvmlite==0.39.1
+Markdown==3.4.4
+MarkupSafe==2.1.3
+matplotlib==3.5.3
+numba==0.56.4
+numpy==1.21.6
+oauthlib==3.2.2
+opencv-python==4.8.1.78
+opt-einsum==3.3.0
+packaging==23.2
+Pillow==9.5.0
+protobuf==3.19.6
+pyasn1==0.5.1
+pyasn1-modules==0.3.0
+pyparsing==3.1.1
+python-dateutil==2.8.2
+requests==2.31.0
+requests-oauthlib==1.3.1
+rsa==4.9
+scikit-learn==1.0.2
+scipy==1.7.3
+six==1.16.0
+sklearn==0.0.post11
+tensorboard==2.11.2
+tensorboard-data-server==0.6.1
+tensorboard-plugin-wit==1.8.1
+tensorflow==2.11.0
+tensorflow-estimator==2.11.0
+tensorflow-io-gcs-filesystem==0.34.0
+termcolor==2.3.0
+threadpoolctl==3.1.0
+typing_extensions==4.7.1
+urllib3==2.0.7
+Werkzeug==2.2.3
+wrapt==1.16.0
+zipp==3.15.0