From d1a9ee3211cdeff6e97541f683687c7e05f11f5a Mon Sep 17 00:00:00 2001 From: sustielz <43007813+sustielz@users.noreply.github.com> Date: Tue, 26 May 2020 13:40:57 -0400 Subject: [PATCH 1/3] Create AutoTighten.p First draft for autotighten. Process is as follows: a) compute new trajectory every nvar frames b) compute mean position of each trajectory, and use trackpy to figure out which particles are in which traps c) compute variance of each trajectory, and adjust alpha for each trap to try and get closer to a target variance --- tasks/AutoTighten.p | 99 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 tasks/AutoTighten.p diff --git a/tasks/AutoTighten.p b/tasks/AutoTighten.p new file mode 100644 index 00000000..795bb5c5 --- /dev/null +++ b/tasks/AutoTighten.p @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# MENU: Auto-Trap +# VISION: True + +from .Task import Task +from PyQt5.QtGui import QVector3D +from collections import deque +import numpy as np +import pandas as pd +import trackpy as tp +from .Video import Video + +N_frames = 200; # number of frames to use to compute variance +## N_it = 10; # number of iterations to perform (This is implicit in nframes in the task "doprocess") +N_delay = 10; # Number of frames to delay between adjustments +alpha_max = 10; +alpha_min = 0; +tol = None; +count = 0; + + + +class AutoTighten(Task): + + def __init__(self, target=1, alpha=[0, 10], nframes = 1050, nvar=200, ndelay = 10, **kwargs): + super(AutoTighten, self).__init__(**kwargs) + self.nframes = nframes; ## Number of frames to run 'doprocess' + self.nvar = nvar; ## Number of frames to track to compute variance + self.ndelay = ndelay; ## Number of frames to delay between iterations + self.target = target ## Target variance + self.alpha_min = alpha[0] + self.alpha_max = alpha[1]; + + + + def initialize(self, frame): //initialize stuff, and work out which features are in which traps (pair traps to features) + self.vision = self.parent.vision + self.vision.realTime(True) + self.count = 0; + self.cdelay=0; + + + + def process(self, frame): + self.count += 1; + if(self.count % self.nvar == 0) + #### First, make a video from the last nvar frames to compute trajectory + var_vid = Video(frames = vision.frames[-nvar:], instrument = self.vision.instrument) ## Find trajectories for last nvar frames + + #### vision.video._frames = vision.video._frames[-nvar:]; ## This is an alternative, but it could with the recording (i.e. vision.video) + #### var_vid = vision.video; + + var_vid.set_trajectories(verbose=False, + search_range=self.vision.linkTol, + memory=int(self.vision.nskip+3)) + trajs = var_vid.traj_dfs + + #### Next, compute each trajectory's variance (to adjust trap strength) and mean position (to pair to trap) + d = {'x': [], 'y': [], 'frame': [], 'i': [], 'var': []} + for i, traj in enumerate(trajs): + d['x'].append(np.mean(traj.x)) + d['y'].append(np.mean(traj.y)) + d['frame'].append(0) + d['var'] = np.mean(traj.x.to_numpy()**2 + traj.y.to_numpy**2) + stat_df = pd.DataFrame(data=d) + + #### Use trackpy to match each trajectory with the respective trap + traps = self.parent.pattern.pattern + d = {'x': [], 'y': [], 'frame': [], 'i': []} + for i, trap in enumerate(traps.flatten()): + d['x'].append(trap.r.x) + d['y'].append(trap.r.y) + d['frame'].append(1) + d['trap'].append(i) + stat_df.append(pd.DataFrame(data=d)) + tp.link(stat_df, self.vision.linkTol) + + #### Adjust each trap based on variance of its trapped particle + for i, trap in enumerate(traps.flatten()): + particle = stat_df[stat_df.trap==i].particle + var = stat_df[stat_df.frame==0 and stat_df.particle==particle].var + trap.alpha = trap.alpha*self.target/var + + self.cdelay = self.ndelay #### Delay for ndelay frames while traps adjust + + if(self.cdelay is not 0): + self.cdelay -= 1 + self.count -= 1 + + + + + + + + + + + From 22f7bbed443e4906bd93bde172434f0dc4b9dc54 Mon Sep 17 00:00:00 2001 From: Jackie Sustiel <43007813+sustielz@users.noreply.github.com> Date: Tue, 26 May 2020 20:12:28 -0400 Subject: [PATCH 2/3] Update and rename AutoTighten.p to AutoTighten.py --- tasks/AutoTighten.p | 99 -------------------------------------------- tasks/AutoTighten.py | 98 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 99 deletions(-) delete mode 100644 tasks/AutoTighten.p create mode 100644 tasks/AutoTighten.py diff --git a/tasks/AutoTighten.p b/tasks/AutoTighten.p deleted file mode 100644 index 795bb5c5..00000000 --- a/tasks/AutoTighten.p +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# MENU: Auto-Trap -# VISION: True - -from .Task import Task -from PyQt5.QtGui import QVector3D -from collections import deque -import numpy as np -import pandas as pd -import trackpy as tp -from .Video import Video - -N_frames = 200; # number of frames to use to compute variance -## N_it = 10; # number of iterations to perform (This is implicit in nframes in the task "doprocess") -N_delay = 10; # Number of frames to delay between adjustments -alpha_max = 10; -alpha_min = 0; -tol = None; -count = 0; - - - -class AutoTighten(Task): - - def __init__(self, target=1, alpha=[0, 10], nframes = 1050, nvar=200, ndelay = 10, **kwargs): - super(AutoTighten, self).__init__(**kwargs) - self.nframes = nframes; ## Number of frames to run 'doprocess' - self.nvar = nvar; ## Number of frames to track to compute variance - self.ndelay = ndelay; ## Number of frames to delay between iterations - self.target = target ## Target variance - self.alpha_min = alpha[0] - self.alpha_max = alpha[1]; - - - - def initialize(self, frame): //initialize stuff, and work out which features are in which traps (pair traps to features) - self.vision = self.parent.vision - self.vision.realTime(True) - self.count = 0; - self.cdelay=0; - - - - def process(self, frame): - self.count += 1; - if(self.count % self.nvar == 0) - #### First, make a video from the last nvar frames to compute trajectory - var_vid = Video(frames = vision.frames[-nvar:], instrument = self.vision.instrument) ## Find trajectories for last nvar frames - - #### vision.video._frames = vision.video._frames[-nvar:]; ## This is an alternative, but it could with the recording (i.e. vision.video) - #### var_vid = vision.video; - - var_vid.set_trajectories(verbose=False, - search_range=self.vision.linkTol, - memory=int(self.vision.nskip+3)) - trajs = var_vid.traj_dfs - - #### Next, compute each trajectory's variance (to adjust trap strength) and mean position (to pair to trap) - d = {'x': [], 'y': [], 'frame': [], 'i': [], 'var': []} - for i, traj in enumerate(trajs): - d['x'].append(np.mean(traj.x)) - d['y'].append(np.mean(traj.y)) - d['frame'].append(0) - d['var'] = np.mean(traj.x.to_numpy()**2 + traj.y.to_numpy**2) - stat_df = pd.DataFrame(data=d) - - #### Use trackpy to match each trajectory with the respective trap - traps = self.parent.pattern.pattern - d = {'x': [], 'y': [], 'frame': [], 'i': []} - for i, trap in enumerate(traps.flatten()): - d['x'].append(trap.r.x) - d['y'].append(trap.r.y) - d['frame'].append(1) - d['trap'].append(i) - stat_df.append(pd.DataFrame(data=d)) - tp.link(stat_df, self.vision.linkTol) - - #### Adjust each trap based on variance of its trapped particle - for i, trap in enumerate(traps.flatten()): - particle = stat_df[stat_df.trap==i].particle - var = stat_df[stat_df.frame==0 and stat_df.particle==particle].var - trap.alpha = trap.alpha*self.target/var - - self.cdelay = self.ndelay #### Delay for ndelay frames while traps adjust - - if(self.cdelay is not 0): - self.cdelay -= 1 - self.count -= 1 - - - - - - - - - - - diff --git a/tasks/AutoTighten.py b/tasks/AutoTighten.py new file mode 100644 index 00000000..3cded616 --- /dev/null +++ b/tasks/AutoTighten.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# MENU: Auto-Trap +# VISION: True + +from .Task import Task +from PyQt5.QtGui import QVector3D +from collections import deque +import numpy as np +import pandas as pd +import trackpy as tp +from .Video import Video + + +class AutoTighten(Task): + """ + + def __init__(self, target=1, alpha=[0, 10], nframes = 1050, nvar=200, ndelay = 10, **kwargs): + super(AutoTighten, self).__init__(**kwargs) + self.target = target ## Target variance + self.alpha_min = alpha[0] ## Allowable trap strength (alpha) range + self.alpha_max = alpha[1] + self.nframes = nframes ## Number of frames to run 'doprocess' + self.nvar = nvar ## Number of frames to track to compute variance + self.ndelay = ndelay ## Number of frames to delay between iterations + + + def initialize(self, frame): //initialize stuff, and work out which features are in which traps (pair traps to features) + self.vision = self.parent.vision + self.vision.realTime(True) + self.count = 0 + self.cdelay=0 + + + + def process(self, frame): + self.count += 1 + if(self.count % self.nvar == 0): #### First, send trajectories from last nval frames to a dataframe + frames = self.vision.video.frames + nvar = self.nvar + while(frames[-nvar].framenumber < frame.framenumber - self.nvar and index >= -self.nvar -1): ## Uncomment to use nvar + nvar += 1 ## CAMERA frames, rather than nvar (detecting) VISION frames + + d = {'x': [], 'y': [], 'framenumber': []} + for frame in frames[-index:]: + for feat in enumerate(frame.features): + d['x'].append(feat.model.particle.x_p) + d['y'].append(feat.model.particle.y_p) + d['framenumber'].append(frame.framenumber) + trajs = tp.link(pd.DataFrame(data=d), self.vision.linkTol, memory=int(self.vision.nskip+3)) + + #### Next, use mean position to pair particle trajectories with trap positions + d = {'x': [], 'y': [], 'framenumber': [], 'val': []} + for particle in range(max(trajs.particle)+1): + x = trajs[trajs.particle==particle].x + y = trajs[trajs.particle==particle].y + d['x'].append(np.mean(x)) + d['y'].append(np.mean(y)) + d['val'].append(np.mean((x - np.mean(x))**2 + (y - np.mean(y))**2)) #### val = variance = |dr|^2 + d['framenumber'].append(0) #### trajectories at frame 1 + stat_df = pd.DataFrame(data=d) + + traps = self.parent.pattern.pattern #### Now, find trap positions... NOTE if traps don't move, this can be done in initialize + for i, trap in enumerate(traps.flatten()): + d['x'].append(trap.r.x) + d['y'].append(trap.r.y) + d['val'].append(i) #### val = trap index + d['frame'].append(1) #### traps at frame 1 + trap_df = pd.DataFrame(data=d) + + #### Match trajectories to traps, and adjust each trap based on variance of trapped particle + pair_df = stat_df.append(trap_df) + tp.link(pair_df, self.vision.linkTol) + + for particle in range(max(trajs.particle)+1): + + + #### Adjust each trap based on variance of its trapped particle + for i, trap in enumerate(traps.flatten()): + particle = stat_df[stat_df.trap==i].particle + var = stat_df[stat_df.frame==0 and stat_df.particle==particle].var + trap.alpha(trap.alpha*self.target/var) + + self.cdelay = self.ndelay #### Delay for ndelay frames while traps adjust + + if(self.cdelay is not 0): + self.cdelay -= 1 + self.count -= 1 + + + + + + + + + + + From 01347de04f44ddf03f75f06580d91cd35257b23e Mon Sep 17 00:00:00 2001 From: Jackie Sustiel <43007813+sustielz@users.noreply.github.com> Date: Wed, 27 May 2020 18:17:20 -0400 Subject: [PATCH 3/3] Update AutoTighten.py --- tasks/AutoTighten.py | 62 +++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/tasks/AutoTighten.py b/tasks/AutoTighten.py index 3cded616..4b70a0ae 100644 --- a/tasks/AutoTighten.py +++ b/tasks/AutoTighten.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# MENU: Auto-Trap +# MENU: Auto-Tighten # VISION: True from .Task import Task @@ -8,11 +8,10 @@ import numpy as np import pandas as pd import trackpy as tp -from .Video import Video class AutoTighten(Task): - """ + """ Adjust each trap's power output (alpha) until trapped particle's variance approaches a target value """ def __init__(self, target=1, alpha=[0, 10], nframes = 1050, nvar=200, ndelay = 10, **kwargs): super(AutoTighten, self).__init__(**kwargs) @@ -24,30 +23,30 @@ def __init__(self, target=1, alpha=[0, 10], nframes = 1050, nvar=200, ndelay = 1 self.ndelay = ndelay ## Number of frames to delay between iterations - def initialize(self, frame): //initialize stuff, and work out which features are in which traps (pair traps to features) + def initialize(self, frame): self.vision = self.parent.vision self.vision.realTime(True) self.count = 0 self.cdelay=0 - - def process(self, frame): - self.count += 1 + def doprocess(self, frame): if(self.count % self.nvar == 0): #### First, send trajectories from last nval frames to a dataframe frames = self.vision.video.frames nvar = self.nvar - while(frames[-nvar].framenumber < frame.framenumber - self.nvar and index >= -self.nvar -1): ## Uncomment to use nvar - nvar += 1 ## CAMERA frames, rather than nvar (detecting) VISION frames + while(frames[-nvar].framenumber < frame.framenumber - self.nvar): ## Uncomment to use nvar (detecting) VISION frames + nvar -= 1 ## rather than nvar CAMERA frames. - d = {'x': [], 'y': [], 'framenumber': []} - for frame in frames[-index:]: - for feat in enumerate(frame.features): - d['x'].append(feat.model.particle.x_p) - d['y'].append(feat.model.particle.y_p) + d = {'x': [], 'y': [], 'framenumber': []} + for frame in frames[-nvar:]: + for feature in enumerate(frame.features): + d['x'].append(feature.model.particle.x_p) + d['y'].append(feature.model.particle.y_p) d['framenumber'].append(frame.framenumber) trajs = tp.link(pd.DataFrame(data=d), self.vision.linkTol, memory=int(self.vision.nskip+3)) - + #### NOTE: Should everything thus far be done using a modified Trajectory object? Or, should we just keep emptying qvision's Video object? + + #### Next, use mean position to pair particle trajectories with trap positions d = {'x': [], 'y': [], 'framenumber': [], 'val': []} for particle in range(max(trajs.particle)+1): @@ -59,8 +58,8 @@ def process(self, frame): d['framenumber'].append(0) #### trajectories at frame 1 stat_df = pd.DataFrame(data=d) - traps = self.parent.pattern.pattern #### Now, find trap positions... NOTE if traps don't move, this can be done in initialize - for i, trap in enumerate(traps.flatten()): + self.traps = self.parent.pattern.pattern #### Now, find trap positions... NOTE if traps don't move, this can be done in initialize + for i, trap in enumerate(self.traps.flatten()): d['x'].append(trap.r.x) d['y'].append(trap.r.y) d['val'].append(i) #### val = trap index @@ -70,21 +69,26 @@ def process(self, frame): #### Match trajectories to traps, and adjust each trap based on variance of trapped particle pair_df = stat_df.append(trap_df) tp.link(pair_df, self.vision.linkTol) - - for particle in range(max(trajs.particle)+1): - - - #### Adjust each trap based on variance of its trapped particle - for i, trap in enumerate(traps.flatten()): - particle = stat_df[stat_df.trap==i].particle - var = stat_df[stat_df.frame==0 and stat_df.particle==particle].var - trap.alpha(trap.alpha*self.target/var) - self.cdelay = self.ndelay #### Delay for ndelay frames while traps adjust + for i, trap in enumerate(self.traps.flatten()): + particle = pair_df[(pair_df.frame==1) and (pair_df.val==i)].particle #### traps are frame 1; trap 'val' is trap index + var = pair_df[(pair_df.frame==0) and (pair_df.particle==particle)].val #### trajs are frame 0, and traj 'val' is variance + + alpha_new = trap.alpha*self.target/var + if alpha_new > self.alpha_max: + trap.alpha(self.alpha_max) + else if alpha_new < self.alpha_min: + trap.alpha(self.alpha_min) + else: + trap.alpha(alpha_new) + + self.count += 1 + self.cdelay = self.ndelay - if(self.cdelay is not 0): + else if self.cdelay is 0: + self.count += 1 + else: self.cdelay -= 1 - self.count -= 1