diff --git a/.idea/codeStyles/codeStyleConfig.xml b/.idea/codeStyles/codeStyleConfig.xml
old mode 100644
new mode 100755
diff --git a/.idea/dictionaries/dawlat_local.xml b/.idea/dictionaries/dawlat_local.xml
new file mode 100755
index 0000000..e92a3c1
--- /dev/null
+++ b/.idea/dictionaries/dawlat_local.xml
@@ -0,0 +1,3 @@
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
old mode 100644
new mode 100755
index 25bde2c..07fef8e
--- a/.idea/inspectionProfiles/Project_Default.xml
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -5,10 +5,19 @@
+
+
+
\ No newline at end of file
diff --git a/.idea/libraries/R_User_Library.xml b/.idea/libraries/R_User_Library.xml
old mode 100644
new mode 100755
diff --git a/.idea/misc.xml b/.idea/misc.xml
old mode 100644
new mode 100755
index 6774f84..3b8b152
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,6 +1,6 @@
-
+
diff --git a/.idea/modules.xml b/.idea/modules.xml
old mode 100644
new mode 100755
index 20376e6..1f86f92
--- a/.idea/modules.xml
+++ b/.idea/modules.xml
@@ -2,7 +2,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
old mode 100644
new mode 100755
diff --git a/.idea/vmreact.iml b/.idea/vmreact.iml
old mode 100644
new mode 100755
diff --git a/.idea/vmreact_conda.iml b/.idea/vmreact_conda.iml
new file mode 100755
index 0000000..3a4807d
--- /dev/null
+++ b/.idea/vmreact_conda.iml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index a0a82a7..946fb80 100755
--- a/README.md
+++ b/README.md
@@ -1,6 +1,7 @@
# vmreact
These directories represent the vmreact associated scripts that have been generated to parse and clean up the raw data output. They also generate additional measures.
+Check out the wiki page for detailed installation instructions:
**`WIKI PAGE: https://github.com/daelsaid/vmreact/wiki`**
@@ -10,18 +11,19 @@ Relative paths for each directory within "vmreact" are listed below:
#VMREACT WORKFLOW
-MAIN SCRIPT: **vmreact_local_lab_main_workflow_all_sites.sh**
-
-
+MAIN SCRIPT: **``vmreact_local_lab_main_workflow_all_sites.sh``**
Usage: full_inquisit_wkflw_all_sites.sh : numeric ID- ####
+
: single numeric digit representing timepoint (1,2,3,4..)
+
: location of administration, (either newmex or emoryu).
+
: If tp2, enter list form as a single numeric digit (1,2,3,or 4). if timepoint 1, leave BLANK
-* running this script will:
+**running this script will:
1. Administer the task
2. Organize the participant data folder
diff --git a/venv/lib/python2.7/site-packages/IPython/core/__init__.py b/build/lib/vmreact-data-visualization/__init__.py
similarity index 100%
rename from venv/lib/python2.7/site-packages/IPython/core/__init__.py
rename to build/lib/vmreact-data-visualization/__init__.py
diff --git a/build/lib/vmreact-data-visualization/vmreact_learning_trajectory_visualization.py b/build/lib/vmreact-data-visualization/vmreact_learning_trajectory_visualization.py
new file mode 100755
index 0000000..da9a26a
--- /dev/null
+++ b/build/lib/vmreact-data-visualization/vmreact_learning_trajectory_visualization.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Mar 5 17:09:19 2018
+
+@author: dawlat_local
+"""
+import matplotlib.pyplot as plt
+import pandas
+from matplotlib.backends.backend_pdf import PdfPages
+from prettyplotlib import brewer2mpl
+
+set2 = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors
+set1 = brewer2mpl.get_map('Set1', 'qualitative', 9).mpl_colors
+bmap=brewer2mpl.get_map('Dark2', 'Qualitative',4).mpl_colors
+mpl.rcParams['axes.color_cycle'] = bmap
+
+#scored csv
+patient_scored=pandas.read_csv('')
+test_df=patient_scored.loc[:,'subj_id':'trial7'].dropna()
+
+pt_all_trials=pandas.DataFrame(data=patient_scored.set_index('subj_id').loc[:,'trial1':'trial7']).dropna().astype(int)
+pt_learning_trials=pandas.DataFrame(data=patient_scored.set_index('subj_id').loc[:,'trial1':'trial5']).dropna().astype(int)
+
+
+
+#tp1 tp2 composite scores
+comp=['total_learning', 'corrected_total_learning','learning_rate','proactive_interference','retroactive_interference','forgetting_and_retention']
+comp_2=['total_learning_2', 'corrected_total_learning_2','learning_rate_2','proactive_interference_2','retroactive_interference_2','forgetting_and_retention_2']
+
+#tp1 tp2 scored data
+y=['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7','total_learning', 'corrected_total_learning','learning_rate','proactive_interference','retroactive_interference','forgetting_and_retention']
+y_2=['trial1_2', 'trial2_2', 'trial3_2', 'trial4_2', 'trial5_2', 'listb_2', 'trial6_2', 'trial7_2']
+
+columns=[c for c in y[0:6]]
+columns=[c for c in comp]
+columns_2=[c for c in comp_2]
+
+plt.cla()
+plt.clf()
+plt.close()
+
+
+
+with PdfPages('/Users/lillyel-said/Desktop/tp1_tp2_patient_learning_trajectories.pdf') as pdf:
+ for index,value in test_df.groupby('subj_id'):
+ val=value.loc[:,'trial1':'trial7']
+ fig, axes = plt.subplots(nrows=1, ncols=1)
+ color = 'blue'
+ fig=pt_all_trials.loc[index][y_2].plot\
+ (ax=axes, y=y_2, subplots=True, fontsize=14, grid=True, yticks=range(0,16), ylim=(0,16),xticks=range(0,8), xlim=(-0.20,7.2),color=color,marker='o', linewidth=3.0, markersize=3.5)
+ fig2=pt_all_trials.loc[index][y].plot\
+ (ax=axes, y=y, subplots=True, fontsize=14, grid=True, yticks=range(0,16), ylim=(0,16),xticks=range(0,8), xlim=(-0.20,7.2), marker='o', linewidth=3.0, markersize=3.5)
+ title= 'Learning Trajectory for Pt: ' + str(index.replace("'", "").replace('.0',""))
+ axes.set_title(title, fontsize=15)
+ axes.legend(['tp1','tp2'])
+ pdf.savefig()
+
+
+
+with PdfPages('/Users/lillyel-said/Desktop/patient_specific_learning_trajectories.pdf') as pdf:
+ for index,value in test_df.groupby('subj_id'):
+ print index,value
+ axes = plt.subplot(111)
+ fig=pt_all_trials.loc[index].astype(float).plot(ax=axes, y=y, subplots=False, fontsize=12, grid=True,yticks=range(0,16), ylim=(0,16), figsize=(14,10), xticks=range(0,6),xlim=(-0.2,4.2), marker='o', linewidth=2, markersize=5,color='purple')
+ fig=pt_all_trials.loc[index].astype(float).plot(ax=axes, y=y, subplots=False, fontsize=16, grid=True,yticks=range(0,16), ylim=(0,16), figsize=(14,10), xticks=range(0,6),xlim=(-0.2,4.2), marker='o', linewidth=2, markersize=5,color='purple')
+ title= 'Patient Learning Trials'
+ axes.set_title(title, fontsize=20)
+ pdf.savefig()
+ plt.cla()
+ plt.clf()
+ plt.close()
+
+
+
+ for ix, value in test_df.groupby('subj_id'):
+ axes2= plt.subplot(121)
+ learning2=pt_all_trials.loc[(ix)][['trial5','trial7']].astype(float).plot(ax=axes2,fontsize=15, subplots=False, grid=True, yticks=range(0,16), figsize=(14,8), ylim=(0,16), xticks=range(0,16), xlim=(-0.2,1.2), marker='o', linewidth=1.75, markersize=5.0, color='#96D38C')
+ axes2.yaxis.set_ylabel=('# of words remembered')
+ pdf.savefig()
+
+ new_df=pt_all_trials[['trial5','trial7']].astype(int)
+ new_df["diff_7_5"] = pt_all_trials["trial7"].sub(patient_testdf["trial5"].astype(int),axis=0)
+ sorted_df=new_df.sort_values(['diff_7_5'])
+ test_group5_7=[]
+
+ for idx,val in sorted_df.groupby(level=0):
+ if (val.loc[:,'diff_7_5'] > 0).bool():
+ print idx, '1'
+ test_group5_7.append([idx,1])
+ if (val.loc[:,'diff_7_5'] < -5).bool():
+ print idx, '3'
+ test_group5_7.append([idx,3])
+ else:
+ print idx , '2'
+ test_group5_7.append([idx,2])
+
+
+ df_with_groupings=pandas.DataFrame(data=test_group5_7,columns=['subj_id','group'])
+ new_df=new_df.reset_index()
+ merged_df=pandas.merge(new_df, df_with_groupings, on='subj_id',copy=True, indicator=False).reset_index()
+
+ plt.cla()
+ plt.clf()
+ plt.close()
+
+
+#trials 5 scores vs trial 7 delayed scores
+with PdfPages('/Users/lillyel-said/Desktop/patient_specific_learning_trajectories.pdf') as pdf:
+ for index,value in merged_df.groupby('subj_id'):
+ ax2= plt.subplot(121)
+ color = 'coral' if (value['group'] == 1).any() else'skyblue' if (value['group'] == 2).any() else 'lightgreen'
+ learning_to_delay=patient_testdf.loc[(index)][['trial5','trial7']].astype(float).plot(ax=ax2,fontsize=15, grid=True, yticks=range(0,16), figsize=(16,10), ylim=(0,16), xticks=range(0,16), xlim=(-0.2,1.2), marker='o', c=color, subplots=False, linewidth=1.75, markersize=5.0)
+ ax2.set_title='Patient performance grouped based on Trial 5 and 7 scores'
+ handles, labels = axes.get_legend_handles_labels()
+ labels=['group 1: 0+ words remembered', 'group 2: between 0 and -5','group 3: > -5 words remembered' ]
+ display = (0,1,2)
+ ax2.legend([label for i,label in enumerate(labels) if i in display],fontsize=10, bbox_to_anchor=(1.57,1.00), ncol=1)
+ pdf.savefig()
+
+
+
+#trials 5-6 differences
+
+xtick_labels_5_6=['trial5', 'trial6']
+for index,value in test_df.groupby('subj_id'):
+ val=value.loc[:,'trial1':'trial7']
+ axes1 = plt.subplot(121)
+ fig1=pt_all_trials.loc[(index)][['trial5','trial6']].astype(float).plot(ax=axes1, fontsize=15, grid=True, yticks=range(0,16), figsize=(12,6), ylim=(0,16), xticks=range(0,2), xlim=(-0.2,1.2), marker='.', linewidth=1.5, markersize=5.0, legend=False, color='b')
+ axes1.set_ylabel=('# of words remembered')
+ ax2= plt.subplot(122)
+ learning_to_delay=pt_all_trials.loc[(index)][['trial5','trial7']].astype(float).plot(ax=ax2,fontsize=15, grid=True, yticks=range(0,16), figsize=(12,6), ylim=(0,16), xticks=range(0,16), xlim=(-0.2,1.2), marker='.', linewidth=1.75, markersize=5.0)
+ ax2.legend(fontsize=10,bbox_to_anchor=(1.37,1.10), ncol=1)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/tests/__init__.py b/build/lib/vmreact-master/scripts/__init__.py
similarity index 100%
rename from venv/lib/python2.7/site-packages/IPython/core/tests/__init__.py
rename to build/lib/vmreact-master/scripts/__init__.py
diff --git a/venv/lib/python2.7/site-packages/IPython/extensions/tests/__init__.py b/build/lib/vmreact-master/scripts/grader/__init__.py
similarity index 100%
rename from venv/lib/python2.7/site-packages/IPython/extensions/tests/__init__.py
rename to build/lib/vmreact-master/scripts/grader/__init__.py
diff --git a/build/lib/vmreact-master/scripts/grader/complete_inquisit_output.py b/build/lib/vmreact-master/scripts/grader/complete_inquisit_output.py
new file mode 100755
index 0000000..12b6a03
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/complete_inquisit_output.py
@@ -0,0 +1,54 @@
+import argparse
+import datetime
+import os
+
+from composite_scores import composite_scores
+from inquisit_demo_summary import demo_and_summary
+from inquisit_demo_summary_newageranges import demo_and_summary_new
+from inquisit_grader import grader
+
+format = "%Y_%m_%d"
+current_date = datetime.datetime.today()
+date = current_date.strftime(format)
+
+parser = argparse.ArgumentParser(
+ description='Grades inquisit data, output: frequency counts of responses to demo survey, parsed raw data (all, primacy, recency), scored data (all, primacy, recency), SR responses compiled, subject age ranges and gender, summary ANT scores, word correlations (all, primacy, recency)')
+
+parser.add_argument('-r', dest='raw_data', help='path to raw data', type=str, required=True)
+parser.add_argument('-d', dest='demo_data', help='demo_csv', type=str, required=True)
+parser.add_argument('-s', dest='summary_data', help='summary csv', type=str, required=True)
+parser.add_argument('-o', dest='output_csv_location', help='path to output folder', type=str, default=os.getcwd())
+
+args = parser.parse_args()
+
+if not os.path.isdir(args.output_csv_location):
+ os.mkdir(args.output_csv_location)
+
+all_subj_data_csv = args.raw_data
+demographic_data = args.demo_data
+final_summary_csv = args.summary_data
+
+demo_and_summary(all_subj_data_csv, args.demo_data, args.summary_data,
+ os.path.join(args.output_csv_location, 'frequency_counts' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'subj_age_agerange_gender' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'sr_responses' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'summary_ant_scores' + '_' + date + '.csv'))
+
+demo_and_summary_new(all_subj_data_csv, args.demo_data, os.path.join(args.output_csv_location,
+ 'subj_age_agerange_gender_new_age_bins' + '_' + date + '.csv'))
+
+grader(all_subj_data_csv, os.path.join(args.output_csv_location, 'parsed_raw_data' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'scored_data' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'word_correlations' + '_' + date + '.csv'), 0)
+
+grader(all_subj_data_csv, os.path.join(args.output_csv_location, 'parsed_raw_data_primacy' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'scored_data_primacy' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'word_correlations_primacy' + '_' + date + '.csv'), 1)
+
+grader(all_subj_data_csv, os.path.join(args.output_csv_location, 'parsed_raw_data_recency' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'scored_data_recency' + '_' + date + '.csv'),
+ os.path.join(args.output_csv_location, 'word_correlations_recency' + '_' + date + '.csv'), 2)
+
+scored_data = os.path.join(args.output_csv_location, 'scored_data' + '_' + date + '.csv')
+
+composite_scores(1, scored_data, os.path.join(args.output_csv_location, 'composite_scores_vakil' + '_' + date + '.csv'))
diff --git a/build/lib/vmreact-master/scripts/grader/composite_scores.py b/build/lib/vmreact-master/scripts/grader/composite_scores.py
new file mode 100755
index 0000000..48d0848
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/composite_scores.py
@@ -0,0 +1,35 @@
+# !/usr/bin/env python2
+
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 27 12:04:33 2018
+
+@author: dawlat_elsaid
+"""
+
+import pandas
+
+def composite_scores(get_comp_scores, input_csv, output_csv):
+ scored_data = pandas.read_csv(input_csv)
+
+ if get_comp_scores == 1:
+ df_trials = scored_data.loc[:, 'subj_id':'trial7']
+ composite_scores = pandas.DataFrame()
+ tmp = pandas.DataFrame()
+
+ composite_scores[['subj_id', 'list_type']] = df_trials[['subj_id', 'list_type']]
+
+ composite_scores['total_learning'] = df_trials[['trial1', 'trial2', 'trial3', 'trial4', 'trial5']].apply(
+ lambda row: pandas.np.sum(row), axis=1)
+
+ tmp['test'] = df_trials[['trial1']] * 5
+
+ composite_scores['corrected_total_learning'] = composite_scores['total_learning'].subtract(tmp['test'])
+
+ composite_scores['learning_rate'] = df_trials['trial5'].subtract(df_trials['trial1'], axis='rows')
+ composite_scores['proactive_interference'] = df_trials['trial1'].subtract(scored_data['listb'], axis='rows')
+ composite_scores['retroactive_interference'] = df_trials['trial5'].subtract(df_trials['trial6'], axis='rows')
+ composite_scores['forgetting_and_retention'] = df_trials['trial5'].subtract(df_trials['trial7'], axis='rows')
+ # composite_scores_transposed=composite_scores.transpose()
+ # composite_scores_transposed.to_csv(output_csv,header=True,index=['measure','score'])
+ composite_scores.to_csv(output_csv, header=True, index=['measure', 'score'])
diff --git a/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary.py b/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary.py
new file mode 100755
index 0000000..58a97af
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary.py
@@ -0,0 +1,170 @@
+import collections
+import csv
+
+
+def demo_and_summary(all_subj_data_csv, demographic_data, final_summary_csv, frequency_count, subj_age_agerange_gender,
+ sr_responses, summary_ant_scores):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ with open(demographic_data, 'U') as file:
+ input_demo_sr_q_csv = csv.reader(file)
+ input_demo_sr_q_csv = map(list, zip(*input_demo_sr_q_csv))
+ demographic_data = dict((rows[0], rows[1:]) for rows in (input_demo_sr_q_csv))
+
+ with open(final_summary_csv, 'U') as file:
+ final_summary_lines = csv.reader(file)
+ final_summary_lines = map(list, zip(*final_summary_lines))
+ rey_summary = dict((rows[0], rows[1:]) for rows in (final_summary_lines))
+
+ age_ranges = {
+ '16-19': range(16, 20, 1),
+ '20-29': range(20, 30, 1),
+ '30-39': range(30, 40, 1),
+ '40-49': range(40, 50, 1),
+ '50-59': range(50, 60, 1),
+ '57-69': range(57, 70, 1),
+ '70-79': range(70, 80, 1),
+ '76-89': range(76, 90, 1)
+ }
+ subj_id_list_demo = []
+ subj_id_only_demo = []
+
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only_demo.append(subject)
+ subj_id_list_combined = [demographic_data['subject'][x] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subject]
+ subj_id_list_demo.append(subj_id_list_combined)
+
+ subj_id_combined = [(idx, val) for idx, val in enumerate(sorted(subj_id_only_demo))]
+
+ subj_val = []
+ key_val_all = []
+ for key in sorted(demographic_data.keys()):
+ for value in sorted(demographic_data[key]):
+ key_val_all.append([key, value])
+ if 'subject' in key:
+ subj_val.append(value)
+ else:
+ continue
+
+ subj_id_with_index = list()
+ for subj_num in subj_val:
+ subj_combined = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo)) if val == subj_num]
+ subj_indexvals = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo))]
+ subj_id_with_index.append(subj_combined)
+
+ new_demo_dict = dict()
+ for key_var in sorted(demographic_data.keys()):
+ if 'latency' not in key_var and 'group' not in key_var and 'build' not in key_var and 'time' not in key_var and 'date' not in key_var:
+ new_demo_dict[key_var] = []
+
+ for index1, val1 in enumerate(key_val_all):
+ if val1[0] in new_demo_dict.keys():
+ new_demo_dict[val1[0]].append(val1[1])
+
+ counter_demo_dict = dict()
+ for key_q in sorted(new_demo_dict.keys()):
+ answer_count = collections.Counter(new_demo_dict[key_q])
+ counter_demo_dict[key_q] = answer_count
+
+ with open(frequency_count, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['survey_question', 'response_counts'])
+ for key, value in sorted(counter_demo_dict.items()):
+ writer.writerow([key, value])
+ csvfile.close()
+
+ subj_age_gender_mem = []
+ x = []
+ for idx2, subj_id in enumerate(subj_id_only_demo):
+ subj_age_gen = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]] for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x] == subj_id]
+ y = [[demographic_data['subject'][x]] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subj_id]
+ subj_age_gender_mem.append(subj_age_gen)
+
+ demo_subj_age_gender = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]]
+ for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x]]
+
+ raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+
+ key_val = []
+ for key in age_ranges.keys():
+ for val in age_ranges[key]:
+ key_val.append([key, val])
+
+ id_age_agerange = []
+ with open(subj_age_agerange_gender, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'gender', 'age', 'age_range'])
+ for subj in sorted(demo_subj_age_gender):
+ subj_from_main_raw_list = []
+ ages = subj[2]
+ gender = subj[1]
+ subj_id_raw = [val for val in raw_data_responses if val[0] == subj[0]]
+ for vals in key_val:
+ age_vals = vals[1]
+ age_vals = str(age_vals)
+ if age_vals == ages:
+ complete_list = subj[0] + ',' + gender + "," + age_vals + "," + vals[0]
+ id_age_agerange.append(complete_list)
+ writer.writerow([subj[0], gender, age_vals, vals[0]])
+ csvfile.close()
+
+ subj_id_only = []
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only.append(subject)
+
+ subj_id_memory = [subj_mem_trials for subj_mem_trials in subj_id_only]
+
+ subj_ids_summary = [x for x in rey_summary['script.subjectid']]
+ subj_ids_summary = sorted(subj_ids_summary)
+
+ summary_key_val = []
+ for key in sorted(rey_summary.keys()):
+ for value in sorted(rey_summary[key]):
+ summary_key_val.append([key, value])
+
+ new_summary_dict = dict()
+ for sum_key in sorted(rey_summary.keys()):
+ if 'script.starttime' not in sum_key and 'script.startdate' not in sum_key and 'script.elapsedtime' not in sum_key and 'values.trialcount' not in sum_key and 'values.completed' not in sum_key and 'values.trialcount' not in sum_key and 'parameters.min_validlatency' not in sum_key and 'computer.platform' not in sum_key:
+ new_summary_dict[sum_key] = []
+
+ for sum_idx, sum_val in enumerate(summary_key_val):
+ if sum_val[0] in new_summary_dict.keys():
+ new_summary_dict[sum_val[0]].append(sum_val[1])
+
+ subject_summary_sr_responses = [[rey_summary['script.subjectid'][x], rey_summary['expressions.gad_7_total'][x],
+ rey_summary['expressions.phq_total'][x],
+ rey_summary['expressions.pcl_4_total'][x],
+ rey_summary['expressions.pcl_total_hybridscore_corrected'][x]] for x in
+ range(len(rey_summary['script.subjectid'])) if
+ rey_summary['values.end_survey_completed'][x] == '1']
+
+ subject_summary_ant_scores = [
+ [rey_summary['script.subjectid'][x], rey_summary['expressions.overallpercentcorrect'][x],
+ rey_summary['expressions.meanRT'][x], rey_summary['expressions.stdRT'][x]] for x in
+ range(len(rey_summary['script.subjectid'])) if rey_summary['values.end_survey_completed'][x] == '1']
+
+ with open(sr_responses, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'gad_7', 'phq', 'pcl_dsm4', 'pcl_hybrid'])
+ for responses in sorted(subject_summary_sr_responses):
+ writer.writerow(responses)
+ csvfile.close()
+
+ with open(summary_ant_scores, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'percent_correct', 'meanRT', 'stdRT'])
+ for scores in sorted(subject_summary_ant_scores):
+ writer.writerow(scores)
+ csvfile.close()
diff --git a/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary_newageranges.py b/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary_newageranges.py
new file mode 100755
index 0000000..8fb6f04
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/inquisit_demo_summary_newageranges.py
@@ -0,0 +1,91 @@
+import csv
+
+
+def demo_and_summary_new(all_subj_data_csv, demographic_data, subj_age_agerange_gender):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ with open(demographic_data, 'U') as file:
+ input_demo_sr_q_csv = csv.reader(file)
+ input_demo_sr_q_csv = map(list, zip(*input_demo_sr_q_csv))
+ demographic_data = dict((rows[0], rows[1:]) for rows in (input_demo_sr_q_csv))
+
+ age_ranges = {
+ '20-29': range(20, 30, 1),
+ '30-39': range(30, 40, 1),
+ '40-49': range(40, 50, 1),
+ '50-59': range(50, 60, 1),
+ '60-69': range(60, 70, 1),
+ '70-90': range(70, 90, 1)}
+
+ subj_id_list_demo = []
+ subj_id_only_demo = []
+
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only_demo.append(subject)
+ subj_id_list_combined = [demographic_data['subject'][x] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subject]
+ subj_id_list_demo.append(subj_id_list_combined)
+
+ subj_id_combined = [(idx, val) for idx, val in enumerate(sorted(subj_id_only_demo))]
+
+ subj_val = []
+ key_val_all = []
+ for key in sorted(demographic_data.keys()):
+ for value in sorted(demographic_data[key]):
+ key_val_all.append([key, value])
+ if 'subject' in key:
+ subj_val.append(value)
+ else:
+ continue
+
+ subj_id_with_index = list()
+ for subj_num in subj_val:
+ subj_combined = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo)) if val == subj_num]
+ subj_indexvals = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo))]
+ subj_id_with_index.append(subj_combined)
+
+ subj_age_gender_mem = []
+ x = []
+ for idx2, subj_id in enumerate(subj_id_only_demo):
+ subj_age_gen = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]] for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x] == subj_id]
+ y = [[demographic_data['subject'][x]] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subj_id]
+ subj_age_gender_mem.append(subj_age_gen)
+
+ demo_subj_age_gender = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]]
+ for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x]]
+
+ raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+
+ key_val = []
+ for key in age_ranges.keys():
+ for val in age_ranges[key]:
+ key_val.append([key, val])
+
+ id_age_agerange = []
+ with open(subj_age_agerange_gender, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'age', 'age_range', 'gender'])
+ for subj in sorted(demo_subj_age_gender):
+ subj_from_main_raw_list = []
+ ages = subj[2]
+ gender = subj[1]
+ subj_id_raw = [val for val in raw_data_responses if val[0] == subj[0]]
+ for vals in key_val:
+ age_vals = vals[1]
+ age_vals = str(age_vals)
+ if age_vals == ages:
+ complete_list = subj[0] + ',' + age_vals + "," + vals[0] + "," + gender
+ id_age_agerange.append(complete_list)
+ writer.writerow([subj[0], age_vals, vals[0], gender])
+ csvfile.close()
diff --git a/build/lib/vmreact-master/scripts/grader/inquisit_grader.py b/build/lib/vmreact-master/scripts/grader/inquisit_grader.py
new file mode 100755
index 0000000..6fd21a6
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/inquisit_grader.py
@@ -0,0 +1,209 @@
+import collections
+import csv
+from difflib import SequenceMatcher
+from math import ceil
+
+
+def grader(all_subj_data_csv, data_output_raw_csv, data_output_scored_csv, word_corr, p_r):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ subj_listtype = []
+ for idx, row in enumerate(all_subj_csv_lines['subject']):
+ if 'rey_list' in all_subj_csv_lines['trialcode'][idx]:
+ subj_listtype.append([all_subj_csv_lines['subject'][idx], all_subj_csv_lines['trialcode'][idx]])
+
+ set_subj_listtype = []
+ for subj in subj_listtype:
+ if subj not in set_subj_listtype:
+ set_subj_listtype.append(subj)
+
+ ## count per list type
+ index_number_resp = dict()
+ for list_type in sorted([x for x in set(all_subj_csv_lines['trialcode']) if 'rey_list' in x]):
+ index_number_resp[list_type] = []
+
+ for idx, response in enumerate(all_subj_csv_lines['response']):
+ if 'recall_response' in all_subj_csv_lines['trialcode'][idx]:
+ if 'listb' not in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][
+ 1]].append(response.lower().strip())
+ elif 'listb' in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][1][
+ :-1] + 'b'].append(response.lower().strip())
+
+ counter_dict = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ rey_recall_word_count = collections.Counter(index_number_resp[list_type])
+ counter_dict[list_type] = rey_recall_word_count
+
+ total_response_for_list = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ total_response_for_list[list_type] = sorted(set(index_number_resp[list_type]))
+
+ if p_r == 0:
+ rey_word_lists = {
+ 'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school', 'parent', 'moon', 'garden',
+ 'hat', 'farmer', 'nose', 'turkey', 'color', 'house', 'river'],
+
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student', 'mother', 'star', 'painting',
+ 'bag', 'wheat', 'mouth', 'chicken', 'sound', 'door', 'stream'],
+
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase', 'cousin', 'earth', 'stairs',
+ 'dog', 'banana', 'town', 'radio', 'hunter', 'bucket', 'field'],
+
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart', 'desert', 'face', 'letter', 'bed',
+ 'machine', 'milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove', 'mountain', 'glasses', 'towel',
+ 'cloud', 'boar', 'lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge', 'cliff', 'bottle', 'soap',
+ 'sky', 'ship', 'goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus', 'chin', 'bleach', 'soap', 'hotel',
+ 'donkey', 'spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool', 'forest', 'perfume', 'ladder',
+ 'girl', 'foot', 'shield', 'pie', 'insect', 'ball', 'car']
+ }
+ elif p_r == 1:
+ rey_word_lists = {'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school'],
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student'],
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase'],
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool']
+ }
+ elif p_r == 2:
+ rey_word_lists = {'rey_list_presentation_1a': ['nose', 'turkey', 'color', 'house', 'river'],
+ 'rey_list_presentation_2a': ['mouth', 'chicken', 'sound', 'door', 'stream'],
+ 'rey_list_presentation_3a': ['town', 'radio', 'hunter', 'bucket', 'field'],
+ 'rey_list_presentation_4a': ['milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['shield', 'pie', 'insect', 'ball', 'car']
+ }
+
+ with open(word_corr, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ for word_list in sorted(total_response_for_list.keys()):
+ word_corrs = []
+ for word in total_response_for_list[word_list]:
+ wordcorrs = [round(SequenceMatcher(None, word, x).ratio(), 3) for x in rey_word_lists[word_list]]
+ word_corrs.append(wordcorrs)
+ writer.writerow([word, max(wordcorrs), rey_word_lists[word_list][wordcorrs.index(max(wordcorrs))]])
+ csvfile.close()
+
+ subj_id_list = []
+ subj_only = []
+ for subj in sorted(set(all_subj_csv_lines['subject'])):
+ try:
+ subj_list_type = [all_subj_csv_lines['trialcode'][x] for x in range(len(all_subj_csv_lines['subject']))
+ if (all_subj_csv_lines['subject'][x] == subj) and (
+ 'rey_list_presentation_' in all_subj_csv_lines['trialcode'][x])][0]
+ subj_id_list.append([subj, subj_list_type])
+ subj_only.append(subj)
+ except:
+ print "%s has an error in their data" % subj
+ continue
+
+ full_raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+ all_responses = []
+ repeats = []
+ list_b_all = []
+ list_a_all = []
+ with open(data_output_raw_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(('subj_id', 'list_type', 'trial', 'response', 'score'))
+ for response in full_raw_data_responses:
+ subj = response[0]
+ list_to_use = [subj_id_list[x][1] for x in range(len(subj_id_list)) if subj_id_list[x][0] == subj][0]
+ list_a_all.append(list_to_use)
+ list_b = list_to_use[:-1] + 'b'
+ list_b_all.append(list_b)
+ if 'listb' in response[1]:
+ if response[2] in rey_word_lists[list_b]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_b]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_b, response[1].split('_')[0], response[2], response[3]
+ else:
+ if response[2] in rey_word_lists[list_to_use]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_to_use]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_to_use, response[1].split('_')[0], response[2], response[3]
+ writer.writerow(new_row)
+ all_responses.append(response)
+ rep = new_row
+ repeats.append(rep)
+ csvfile.close()
+
+ trial_breaks = []
+ trial_lines = [all_responses[y][1] for y in range(0, len(all_responses))]
+ trial_breaks = [i for i, x in enumerate(trial_lines[0:])
+ if x.split('_')[0] != trial_lines[i - 1].split('_')[0]]
+
+ trial_breaks = trial_breaks + [len(all_responses)]
+
+ subj_scores = []
+ final = []
+ final_repeats = []
+ for idx, val in enumerate(trial_breaks[:-1]):
+ score = 0
+ word_list = []
+ for line in all_responses[trial_breaks[idx]:trial_breaks[idx + 1]]:
+ if line[3] == 1:
+ score = score + 1
+ word_list.append(line[2])
+ test = []
+ for idx, word in enumerate(word_list):
+ test.append([SequenceMatcher(None, word, x).ratio() for x in
+ [y for idx2, y in enumerate(word_list) if idx != idx2]])
+ repeats = 0
+ for word in test:
+ word_thresholded = [ceil(x) for x in word if x > 0.8]
+ n = sum(word_thresholded)
+ if n != 0:
+ repeats = repeats + (((n * (n + 1)) - 1) / (n + 1))
+ subj_scores.append([line[0], line[1].split('_')[0], score, repeats])
+
+ with open(data_output_scored_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(
+ ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7',
+ 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats', 'trial3_#_repeats', 'trial4_#_repeats',
+ 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats'])
+ subj_scores = subj_scores + ['placeholder']
+ for idx, scores in enumerate(sorted(subj_scores[:-1])):
+ scored = str(scores[2] - scores[3])
+ repeat_nm = scores[3]
+ final.append(scored)
+ final_repeats.append(repeat_nm)
+ subj_id = [scores[0]]
+ for idx2, val in enumerate(subj_id_list):
+ if subj_id[0] == subj_id_list[idx2][0]:
+ subj_list = subj_id_list[idx2][1].split('_')[3]
+ final_row = subj_id + [subj_list] + final + final_repeats
+ if scores[0] != sorted(subj_scores)[idx + 1][0]:
+ writer.writerow(final_row)
+ final_row = []
+ subj_id = []
+ final = []
+ final_repeats = []
+ csvfile.close()
diff --git a/build/lib/vmreact-master/scripts/grader/run_composite_scoring.py b/build/lib/vmreact-master/scripts/grader/run_composite_scoring.py
new file mode 100755
index 0000000..1b8cc9e
--- /dev/null
+++ b/build/lib/vmreact-master/scripts/grader/run_composite_scoring.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Feb 27 12:04:33 2018
+
+@author: dawlat_local
+"""
+
+import datetime
+import os
+import sys
+
+from composite_scores import composite_scores
+
+input_csv = sys.argv[1]
+output_path = sys.argv[2]
+
+format = "%Y_%m_%d"
+current_date = datetime.datetime.today()
+date = current_date.strftime(format)
+
+output_csv = os.path.join(output_path, 'composite_scores_vakil' + '_' + date + '.csv')
+
+composite_scores(1, input_csv, output_csv)
diff --git a/build/lib/vmreact-merges/2vmreact_compilation.py b/build/lib/vmreact-merges/2vmreact_compilation.py
new file mode 100755
index 0000000..027da57
--- /dev/null
+++ b/build/lib/vmreact-merges/2vmreact_compilation.py
@@ -0,0 +1,572 @@
+# coding: utf-8
+
+# In[24]:
+
+import datetime
+from difflib import SequenceMatcher
+from glob import glob
+from math import ceil
+from shutil import copy, move
+
+import pandas as pd
+from IPython.display import display
+
+
+# #Grading Script
+# In[25]:
+
+
+def grader(all_subj_data_csv, data_output_raw_csv, data_output_scored_csv, word_corr, p_r):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ subj_listtype = []
+ for idx, row in enumerate(all_subj_csv_lines['subject']):
+ if 'rey_list' in all_subj_csv_lines['trialcode'][idx]:
+ subj_listtype.append([all_subj_csv_lines['subject'][idx], all_subj_csv_lines['trialcode'][idx]])
+
+ set_subj_listtype = []
+ for subj in subj_listtype:
+ if subj not in set_subj_listtype:
+ set_subj_listtype.append(subj)
+
+ ## count per list type
+ index_number_resp = dict()
+ for list_type in sorted([x for x in set(all_subj_csv_lines['trialcode']) if 'rey_list' in x]):
+ index_number_resp[list_type] = []
+
+ for idx, response in enumerate(all_subj_csv_lines['response']):
+ if 'recall_response' in all_subj_csv_lines['trialcode'][idx]:
+ if 'listb' not in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][
+ 1]].append(response.lower().strip())
+ elif 'listb' in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][1][
+ :-1] + 'b'].append(response.lower().strip())
+
+ counter_dict = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ rey_recall_word_count = collections.Counter(index_number_resp[list_type])
+ counter_dict[list_type] = rey_recall_word_count
+
+ total_response_for_list = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ total_response_for_list[list_type] = sorted(set(index_number_resp[list_type]))
+
+ if p_r == 0:
+ rey_word_lists = {
+ 'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school', 'parent', 'moon', 'garden',
+ 'hat', 'farmer', 'nose', 'turkey', 'color', 'house', 'river'],
+
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student', 'mother', 'star', 'painting',
+ 'bag', 'wheat', 'mouth', 'chicken', 'sound', 'door', 'stream'],
+
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase', 'cousin', 'earth', 'stairs',
+ 'dog', 'banana', 'town', 'radio', 'hunter', 'bucket', 'field'],
+
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart', 'desert', 'face', 'letter', 'bed',
+ 'machine', 'milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove', 'mountain', 'glasses', 'towel',
+ 'cloud', 'boar', 'lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge', 'cliff', 'bottle', 'soap',
+ 'sky', 'ship', 'goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus', 'chin', 'bleach', 'soap', 'hotel',
+ 'donkey', 'spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool', 'forest', 'perfume', 'ladder',
+ 'girl', 'foot', 'shield', 'pie', 'insect', 'ball', 'car']
+ }
+ elif p_r == 1:
+ rey_word_lists = {'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school'],
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student'],
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase'],
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool']
+ }
+ elif p_r == 2:
+ rey_word_lists = {'rey_list_presentation_1a': ['nose', 'turkey', 'color', 'house', 'river'],
+ 'rey_list_presentation_2a': ['mouth', 'chicken', 'sound', 'door', 'stream'],
+ 'rey_list_presentation_3a': ['town', 'radio', 'hunter', 'bucket', 'field'],
+ 'rey_list_presentation_4a': ['milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['shield', 'pie', 'insect', 'ball', 'car']
+ }
+
+ with open(word_corr, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ for word_list in sorted(total_response_for_list.keys()):
+ word_corrs = []
+ for word in total_response_for_list[word_list]:
+ wordcorrs = [round(SequenceMatcher(None, word, x).ratio(), 3) for x in rey_word_lists[word_list]]
+ word_corrs.append(wordcorrs)
+ writer.writerow([word, max(wordcorrs), rey_word_lists[word_list][wordcorrs.index(max(wordcorrs))]])
+ csvfile.close()
+
+ subj_id_list = []
+ subj_only = []
+ for subj in sorted(set(all_subj_csv_lines['subject'])):
+ try:
+ subj_list_type = [all_subj_csv_lines['trialcode'][x] for x in range(len(all_subj_csv_lines['subject']))
+ if (all_subj_csv_lines['subject'][x] == subj) and (
+ 'rey_list_presentation_' in all_subj_csv_lines['trialcode'][x])][0]
+ subj_id_list.append([subj, subj_list_type])
+ subj_only.append(subj)
+ except:
+ print "%s has an error in their data" % subj
+ continue
+
+ full_raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+ all_responses = []
+ repeats = []
+ list_b_all = []
+ list_a_all = []
+ with open(data_output_raw_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(('subj_id', 'list_type', 'trial', 'response', 'score'))
+ for response in full_raw_data_responses:
+ subj = response[0]
+ list_to_use = [subj_id_list[x][1] for x in range(len(subj_id_list)) if subj_id_list[x][0] == subj][0]
+ list_a_all.append(list_to_use)
+ list_b = list_to_use[:-1] + 'b'
+ list_b_all.append(list_b)
+ if 'listb' in response[1]:
+ if response[2] in rey_word_lists[list_b]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_b]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_b, response[1].split('_')[0], response[2], response[3]
+ else:
+ if response[2] in rey_word_lists[list_to_use]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_to_use]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_to_use, response[1].split('_')[0], response[2], response[3]
+ writer.writerow(new_row)
+ all_responses.append(response)
+ rep = new_row
+ repeats.append(rep)
+ csvfile.close()
+
+ trial_breaks = []
+ trial_lines = [all_responses[y][1] for y in range(0, len(all_responses))]
+ trial_breaks = [i for i, x in enumerate(trial_lines[0:])
+ if x.split('_')[0] != trial_lines[i - 1].split('_')[0]]
+
+ trial_breaks = trial_breaks + [len(all_responses)]
+
+ subj_scores = []
+ final = []
+ final_repeats = []
+ for idx, val in enumerate(trial_breaks[:-1]):
+ score = 0
+ word_list = []
+ for line in all_responses[trial_breaks[idx]:trial_breaks[idx + 1]]:
+ if line[3] == 1:
+ score = score + 1
+ word_list.append(line[2])
+ test = []
+ for idx, word in enumerate(word_list):
+ test.append([SequenceMatcher(None, word, x).ratio() for x in
+ [y for idx2, y in enumerate(word_list) if idx != idx2]])
+ repeats = 0
+ for word in test:
+ word_thresholded = [ceil(x) for x in word if x > 0.8]
+ n = sum(word_thresholded)
+ if n != 0:
+ repeats = repeats + (((n * (n + 1)) - 1) / (n + 1))
+ subj_scores.append([line[0], line[1].split('_')[0], score, repeats])
+
+ with open(data_output_scored_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(
+ ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7',
+ 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats', 'trial3_#_repeats', 'trial4_#_repeats',
+ 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats'])
+ subj_scores = subj_scores + ['placeholder']
+ for idx, scores in enumerate(sorted(subj_scores[:-1])):
+ scored = str(scores[2] - scores[3])
+ repeat_nm = scores[3]
+ final.append(scored)
+ final_repeats.append(repeat_nm)
+ subj_id = [scores[0]]
+ for idx2, val in enumerate(subj_id_list):
+ if subj_id[0] == subj_id_list[idx2][0]:
+ subj_list = subj_id_list[idx2][1].split('_')[3]
+ final_row = subj_id + [subj_list] + final + final_repeats
+ if scores[0] != sorted(subj_scores)[idx + 1][0]:
+ writer.writerow(final_row)
+ final_row = []
+ subj_id = []
+ final = []
+ final_repeats = []
+ csvfile.close()
+
+
+# #demo and age range function
+
+# In[26]:
+
+import os
+import csv
+import collections
+
+
+def demo_and_summary_new(all_subj_data_csv, demographic_data, subj_age_agerange_gender):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ with open(demographic_data, 'U') as file:
+ input_demo_sr_q_csv = csv.reader(file)
+ input_demo_sr_q_csv = map(list, zip(*input_demo_sr_q_csv))
+ demographic_data = dict((rows[0], rows[1:]) for rows in (input_demo_sr_q_csv))
+
+ age_ranges = {
+ '20-29': range(20, 30, 1),
+ '30-39': range(30, 40, 1),
+ '40-49': range(40, 50, 1),
+ '50-59': range(50, 60, 1),
+ '60-69': range(60, 70, 1),
+ '70-90': range(70, 90, 1)}
+
+ subj_id_list_demo = []
+ subj_id_only_demo = []
+
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only_demo.append(subject)
+ subj_id_list_combined = [demographic_data['subject'][x] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subject]
+ subj_id_list_demo.append(subj_id_list_combined)
+
+ subj_id_combined = [(idx, val) for idx, val in enumerate(sorted(subj_id_only_demo))]
+
+ subj_val = []
+ key_val_all = []
+ for key in sorted(demographic_data.keys()):
+ for value in sorted(demographic_data[key]):
+ key_val_all.append([key, value])
+ if 'subject' in key:
+ subj_val.append(value)
+ else:
+ continue
+
+ subj_id_with_index = list()
+ for subj_num in subj_val:
+ subj_combined = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo)) if val == subj_num]
+ subj_indexvals = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo))]
+ subj_id_with_index.append(subj_combined)
+
+ subj_age_gender_mem = []
+ x = []
+ for idx2, subj_id in enumerate(subj_id_only_demo):
+ subj_age_gen = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]] for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x] == subj_id]
+ y = [[demographic_data['subject'][x]] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subj_id]
+ subj_age_gender_mem.append(subj_age_gen)
+
+ demo_subj_age_gender = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]]
+ for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x]]
+
+ raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+
+ key_val = []
+ for key in age_ranges.keys():
+ for val in age_ranges[key]:
+ key_val.append([key, val])
+
+ id_age_agerange = []
+ with open(subj_age_agerange_gender, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'age', 'age_range', 'gender'])
+ for subj in sorted(demo_subj_age_gender):
+ subj_from_main_raw_list = []
+ ages = subj[2]
+ gender = subj[1]
+ subj_id_raw = [val for val in raw_data_responses if val[0] == subj[0]]
+ for vals in key_val:
+ age_vals = vals[1]
+ age_vals = str(age_vals)
+ if age_vals == ages:
+ complete_list = subj[0] + ',' + age_vals + "," + vals[0] + "," + gender
+ id_age_agerange.append(complete_list)
+ writer.writerow([subj[0], age_vals, vals[0], gender])
+ csvfile.close()
+
+
+# In[51]:
+
+format = "%Y_%m_%d"
+current_date = datetime.datetime.today()
+date = current_date.strftime(format)
+
+output_csv_location = '/Users/lillyel-said/Desktop/vmreact/output/'
+
+for raw in glob('/Users/lillyel-said/Desktop/vmreact/vmreact/1_rawdata/*/*raw.csv'):
+ raw_data = raw
+ demo_data = raw.replace('raw.csv', 'demo.csv')
+ summary_data = raw.replace('raw.csv', 'summary.csv')
+ prefix = 'mturk_' + os.path.basename(os.path.dirname(raw_data)).split('_')[1] + '_'
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations.csv'), 0)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_primacy.csv'), 1)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_recency.csv'), 2)
+ demo_and_summary_new(raw_data, demo_data, os.path.join(output_csv_location, prefix + 'age_range_gender.csv'))
+ copy(demo_data, os.path.join(output_csv_location, prefix + 'demo.csv'))
+ copy(summary_data, os.path.join(output_csv_location, prefix + 'summary.csv'))
+
+# In[13]:
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/output/'
+for scored_csv in glob(os.path.join(scored_dir, '*scored*')):
+ with open(scored_csv, 'U') as source:
+ rdr = csv.reader(source)
+ with open(os.path.join(scored_dir, 'tmp.csv'), 'wb') as result:
+ wtr = csv.writer(result)
+ for r in rdr:
+ wtr.writerow(r[0:18])
+ move(os.path.join(scored_dir, 'tmp.csv'), scored_csv)
+ print scored_csv
+
+# In[ ]:
+
+# Getting composite scores from scored
+
+
+# In[14]:
+
+
+import pandas
+import os
+
+
+def composite_scores(input_csv, output_csv):
+ scored_data = pandas.read_csv(input_csv)
+ print input_csv
+ df_trials = scored_data.loc[:, 'trial1':'trial7']
+ print df_trials.columns.tolist()
+ composite_scores = pandas.DataFrame()
+ tmp = pandas.DataFrame()
+ composite_scores['total_learning'] = df_trials[['trial1', 'trial2', 'trial3', 'trial4', 'trial5']].apply(
+ lambda row: np.sum(row), axis=1)
+ tmp['test'] = df_trials['trial1'].tolist() * 5
+ composite_scores['corrected_total_learning'] = composite_scores['total_learning'].subtract(tmp['test'])
+
+ composite_scores['learning_rate'] = df_trials['trial5'].subtract(df_trials['trial1'], axis='rows')
+ composite_scores['proactive_interference'] = df_trials['trial1'].subtract(scored_data['listb'], axis='rows')
+ composite_scores['retroactive_interference'] = df_trials['trial5'].subtract(df_trials['trial6'], axis='rows')
+
+ composite_scores['forgetting_and_retention'] = df_trials['trial5'].subtract(df_trials['trial7'], axis='rows')
+
+ composite_scores_transposed = composite_scores.transpose()
+
+ composite_scores_transposed.to_csv(output_csv, header=True, index=['measure', 'score'])
+ composite_scores.to_csv(output_csv, header=True, index=['measure', 'score'])
+
+
+for scored in glob('/Users/lillyel-said/Desktop/vmreact/output/*_scored_data.csv'):
+ composite_scores(scored, scored.replace('_scored_data.csv', '_composite_scores.csv'))
+
+# In[7]:
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/output/'
+
+demo_cols = []
+clin_raw_cols = []
+sum_cols = ['script.startdate', 'script.starttime', 'subject',
+ 'expressions.gad_7_total', 'expressions.phq_total', 'expressions.pcl_4_total',
+ 'expressions.pcl_total_hybridscore_corrected', 'expressions.pcl_total_hybridscore_uncorrected']
+scored_cols = ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3',
+ 'trial4', 'trial5', 'trial6', 'trial7', 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats',
+ 'trial3_#_repeats', 'trial4_#_repeats', 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats']
+composite_cols = ['subject', 'total_learning', 'corrected_total_learning', 'learning_rate',
+ 'proactive_interference', 'retroactive_interference', 'forgetting_and_retention']
+
+age_range_gender_cols = ['age_range']
+
+for batch in range(1, 9):
+ batch = str(batch)
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ summ = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+ age_range_gender_csv = os.path.join(scored_dir, 'mturk_batch' + batch + '_age_range_gender.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ # demo_cols.extend([x for x in demo_df.columns.tolist() if ('latency' not in x and 'online' not in x and 'Unnamed' not in x and 'time_comp' not in x and 'subj_id' not in x)])
+ demo_cols.extend([x for x in demo_df.columns.tolist() if
+ ('latency' not in x and 'Unnamed' not in x and 'subj_id' not in x and 'age_textbox')])
+ print batch
+ age_range_df = pd.read_csv(age_range_gender_csv)
+ age_range_gender_cols.extend(
+ [x for x in age_range_df.columns.tolist() if ('age' not in x and 'subj_id' not in x and 'gender' not in x)])
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ clin_raw_cols.extend(
+ [x for x in clin_raw_df.columns.tolist() if 'latency' not in x and 'end' not in x and 'Unnamed' not in x])
+ sum_df = pd.read_csv(summ, dtype=str)
+ scored_df = pd.read_csv(scored, dtype=str)
+ comp_df = pd.read_csv(composite, dtype=str).rename(index=str, columns={'Unnamed: 0': 'subject'})
+ age_range_gender = pd.read_csv(age_range_gender_csv, dtype=str)
+
+demo_cols = list(set(demo_cols))
+clin_raw_cols = list(set(clin_raw_cols))
+
+print demo_cols
+print clin_raw_cols
+
+# need to get latency values,
+# use the scored to set the subject ids.
+# append composite to scored_cols since they're in the same order and composite doesn't have subject ids
+# summary - use script.subjectid
+# demo - use subject
+# clin_raw - use subject
+
+# In[95]:
+
+import numpy as np
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+latency_csv = os.path.join(scored_dir, 'vmreact_latency_summary.csv')
+
+for batch in range(1, 9):
+ # for batch in [8]:
+ batch_df = pd.DataFrame()
+ batch = str(batch)
+ print 'mturk_batch' + batch
+
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ sum = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ primacy = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data_primacy.csv')
+ recency = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data_recency.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ sum_df = pd.read_csv(sum, dtype=str).rename(index=str, columns={'script.subjectid': 'subject'})
+ scored_df = pd.read_csv(scored)
+
+ primacy_df = pd.read_csv(primacy, dtype=str)
+ recency_df = pd.read_csv(recency, dtype=str)
+
+ extra_measures = primacy_df.merge(recency_df, on='subj_id', left_index=True, how='left',
+ suffixes=('_primacy', '_recency')).rename(columns={'subj_id': 'subject'})
+ comp_df = pd.read_csv(composite).rename(index=str, columns={'Unnamed: 0': 'subject'})
+ comp_df['subject'] = comp_df['subject'].apply(int)
+
+ vmreact_df = pd.merge(scored_df, comp_df, left_index=True, right_on='subject', how='left').drop('subject', axis=1)
+ vmreact_df['subj_id'] = vmreact_df['subj_id'].astype(str)
+
+ # vmreact_df['subj_id']=vmreact_df['subj_id'].apply(pd.to_numeric)
+ latency_df = pd.read_csv(latency_csv, dtype=str)
+ latency_df = latency_df.drop_duplicates().reset_index()
+
+ subject_ids = vmreact_df['subj_id'].tolist()
+
+ vmreact_df = vmreact_df.merge(extra_measures, left_on='subj_id', right_on='subject').drop('subject', axis=1)
+
+ batch_demo_cols = [x for x in demo_df.columns.tolist() if x in demo_cols]
+ append_demo_cols = [x for x in demo_cols if x not in demo_df.columns.tolist()]
+ demo_df = demo_df[demo_df['subject'].astype(str).isin(subject_ids)][batch_demo_cols]
+
+ for col in append_demo_cols:
+ demo_df[col] = np.nan
+ # print demo_df
+ # demo_df['subject']=demo_df['subject'].apply(pd.to_numeric)
+
+ batch_clin_cols = [x for x in clin_raw_df.columns.tolist() if x in clin_raw_cols]
+ append_clin_cols = [x for x in clin_raw_cols if x not in clin_raw_df.columns.tolist()]
+ clin_raw_df = clin_raw_df[clin_raw_df['subject'].astype(str).isin(subject_ids)][batch_clin_cols]
+ for col in sorted(append_clin_cols):
+ clin_raw_df[col] = np.nan
+ # clin_raw_df['subject']=clin_raw_df['subject'].apply(pd.to_numeric)
+
+ batch_sum_cols = [x for x in sum_df.columns.tolist() if x in sum_cols]
+ append_sum_cols = [x for x in sum_cols if x not in sum_df.columns.tolist()]
+ sum_df = sum_df[sum_df['subject'].astype(str).isin(subject_ids)][batch_sum_cols]
+ for col in sorted(append_sum_cols):
+ sum_df[col] = np.nan
+ # sum_df['subject']=sum_df['subject'].apply(pd.to_numeric)
+
+ batch_df = demo_df.merge(sum_df, left_on='subject', right_on='subject').drop(
+ ['script.startdate', 'script.starttime'], axis=1)
+ batch_df = batch_df.merge(clin_raw_df, left_on='subject', right_on='subject').drop(
+ ['date_y', 'time_y', 'group_y', 'build_y'], axis=1)
+ batch_df = batch_df.merge(vmreact_df, left_on='subject', right_on='subj_id').drop('subj_id', axis=1)
+ batch_df = batch_df.rename(columns={'date_x': 'date', 'time_x': 'time', 'group_x': 'group', 'build_x': 'build'})
+ # print batch_df
+
+ print subject_ids
+ latency_df['subjid'] = latency_df['subjid'].astype(str)
+ latency_df['date'] = latency_df['date'].astype(int)
+ batch_df['date'] = batch_df['date'].astype(int)
+
+ latency_df = latency_df.loc[(latency_df['subjid'].isin(
+ batch_df['subject'].astype(str).tolist()))] # & latency_df['date'].isin(batch_df['date'].tolist()))]
+
+ latency_df = latency_df.loc[(
+ latency_df['subjid'].isin(batch_df['subject'].astype(str).tolist()) & latency_df['date'].isin(
+ batch_df['date'].tolist()))]
+
+ batch_df['subject'] = batch_df['subject'].astype(str)
+ batch_df = batch_df.merge(latency_df, left_on='subject', right_on='subjid')
+
+ batch_df.to_csv(os.path.join(scored_dir, 'mturk_batch' + batch + '_compiled.csv'))
+# os.system('open /Users/cdla/Desktop/scratch/vmreact/2_vmreact/'+'mturk_batch'+batch+'_compiled.csv')
+
+
+# In[97]:
+
+dataframes_to_concat = []
+result = []
+for compiled_csv in glob(os.path.join(scored_dir, '*compiled.csv')):
+ df = pd.read_csv(compiled_csv, dtype=str)
+ dataframes_to_concat.append(df)
+
+result = pd.concat(dataframes_to_concat).reindex_axis(df.columns.tolist(), axis=1).drop(
+ ['index', 'date_y', 'subjid', 'Unnamed: 0'], axis=1).dropna(how='all', axis=1).drop_duplicates()
+
+# print result.subject
+result = result[~result.subject.isin(['XXX', 'AVD6HMIO1HLFI', 'A5EU1AQJNC7F2'])]
+result.drop_duplicates(['date_x', 'subject'], inplace=True)
+display(result)
+result = result.drop_duplicates()
+result.to_csv(os.path.join(scored_dir, 'mturk_vmreact_complete_compilation.csv'), index=False)
+
+# In[76]:
+
+
+# In[ ]:
diff --git a/venv/lib/python2.7/site-packages/IPython/lib/tests/__init__.py b/build/lib/vmreact-merges/__init__.py
similarity index 100%
rename from venv/lib/python2.7/site-packages/IPython/lib/tests/__init__.py
rename to build/lib/vmreact-merges/__init__.py
diff --git a/build/lib/vmreact-merges/edits.py b/build/lib/vmreact-merges/edits.py
new file mode 100755
index 0000000..c9e27ba
--- /dev/null
+++ b/build/lib/vmreact-merges/edits.py
@@ -0,0 +1,448 @@
+# coding: utf-8
+
+# In[3]:
+
+import collections
+import csv
+import datetime
+from difflib import SequenceMatcher
+from glob import glob
+from math import ceil
+from shutil import copy, move
+
+import pandas as pd
+from IPython.display import display
+
+
+# #Grading Script
+# In[2]:
+
+
+def grader(all_subj_data_csv, data_output_raw_csv, data_output_scored_csv, word_corr, p_r):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ subj_listtype = []
+ for idx, row in enumerate(all_subj_csv_lines['subject']):
+ if 'rey_list' in all_subj_csv_lines['trialcode'][idx]:
+ subj_listtype.append([all_subj_csv_lines['subject'][idx], all_subj_csv_lines['trialcode'][idx]])
+
+ set_subj_listtype = []
+ for subj in subj_listtype:
+ if subj not in set_subj_listtype:
+ set_subj_listtype.append(subj)
+
+ ## count per list type
+ index_number_resp = dict()
+ for list_type in sorted([x for x in set(all_subj_csv_lines['trialcode']) if 'rey_list' in x]):
+ index_number_resp[list_type] = []
+
+ for idx, response in enumerate(all_subj_csv_lines['response']):
+ if 'recall_response' in all_subj_csv_lines['trialcode'][idx]:
+ if 'listb' not in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][
+ 1]].append(response.lower().strip())
+ elif 'listb' in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][1][
+ :-1] + 'b'].append(response.lower().strip())
+
+ counter_dict = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ rey_recall_word_count = collections.Counter(index_number_resp[list_type])
+ counter_dict[list_type] = rey_recall_word_count
+
+ total_response_for_list = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ total_response_for_list[list_type] = sorted(set(index_number_resp[list_type]))
+
+ if p_r == 0:
+ rey_word_lists = {
+ 'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school', 'parent', 'moon', 'garden',
+ 'hat', 'farmer', 'nose', 'turkey', 'color', 'house', 'river'],
+
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student', 'mother', 'star', 'painting',
+ 'bag', 'wheat', 'mouth', 'chicken', 'sound', 'door', 'stream'],
+
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase', 'cousin', 'earth', 'stairs',
+ 'dog', 'banana', 'town', 'radio', 'hunter', 'bucket', 'field'],
+
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart', 'desert', 'face', 'letter', 'bed',
+ 'machine', 'milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove', 'mountain', 'glasses', 'towel',
+ 'cloud', 'boar', 'lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge', 'cliff', 'bottle', 'soap',
+ 'sky', 'ship', 'goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus', 'chin', 'bleach', 'soap', 'hotel',
+ 'donkey', 'spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool', 'forest', 'perfume', 'ladder',
+ 'girl', 'foot', 'shield', 'pie', 'insect', 'ball', 'car']
+ }
+ elif p_r == 1:
+ rey_word_lists = {'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school'],
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student'],
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase'],
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool']
+ }
+ elif p_r == 2:
+ rey_word_lists = {'rey_list_presentation_1a': ['nose', 'turkey', 'color', 'house', 'river'],
+ 'rey_list_presentation_2a': ['mouth', 'chicken', 'sound', 'door', 'stream'],
+ 'rey_list_presentation_3a': ['town', 'radio', 'hunter', 'bucket', 'field'],
+ 'rey_list_presentation_4a': ['milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['shield', 'pie', 'insect', 'ball', 'car']
+ }
+
+ with open(word_corr, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ for word_list in sorted(total_response_for_list.keys()):
+ word_corrs = []
+ for word in total_response_for_list[word_list]:
+ wordcorrs = [round(SequenceMatcher(None, word, x).ratio(), 3) for x in rey_word_lists[word_list]]
+ word_corrs.append(wordcorrs)
+ writer.writerow([word, max(wordcorrs), rey_word_lists[word_list][wordcorrs.index(max(wordcorrs))]])
+ csvfile.close()
+
+ subj_id_list = []
+ subj_only = []
+ for subj in sorted(set(all_subj_csv_lines['subject'])):
+ try:
+ subj_list_type = [all_subj_csv_lines['trialcode'][x] for x in range(len(all_subj_csv_lines['subject']))
+ if (all_subj_csv_lines['subject'][x] == subj) and (
+ 'rey_list_presentation_' in all_subj_csv_lines['trialcode'][x])][0]
+ subj_id_list.append([all_subj_csv_lines['date'][x] + '_' + subj, subj_list_type])
+ subj_only.append(subj)
+ except:
+ print "%s has an error in their data" % subj
+ continue
+
+ full_raw_data_responses = [
+ [all_subj_csv_lines['date'][x] + '_' + all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+ all_responses = []
+ repeats = []
+ list_b_all = []
+ list_a_all = []
+ with open(data_output_raw_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(('subj_id', 'list_type', 'trial', 'response', 'score'))
+ for response in full_raw_data_responses:
+ subj = response[0]
+ list_to_use = [subj_id_list[x][1] for x in range(len(subj_id_list)) if subj_id_list[x][0] == subj][0]
+ list_a_all.append(list_to_use)
+ list_b = list_to_use[:-1] + 'b'
+ list_b_all.append(list_b)
+ if 'listb' in response[1]:
+ if response[2] in rey_word_lists[list_b]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_b]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_b, response[1].split('_')[0], response[2], response[3]
+ else:
+ if response[2] in rey_word_lists[list_to_use]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_to_use]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_to_use, response[1].split('_')[0], response[2], response[3]
+ writer.writerow(new_row)
+ all_responses.append(response)
+ rep = new_row
+ repeats.append(rep)
+ csvfile.close()
+
+ trial_breaks = []
+ trial_lines = [all_responses[y][1] for y in range(0, len(all_responses))]
+ trial_breaks = [i for i, x in enumerate(trial_lines[0:])
+ if x.split('_')[0] != trial_lines[i - 1].split('_')[0]]
+
+ trial_breaks = trial_breaks + [len(all_responses)]
+
+ subj_scores = []
+ final = []
+ final_repeats = []
+ for idx, val in enumerate(trial_breaks[:-1]):
+ score = 0
+ word_list = []
+ for line in all_responses[trial_breaks[idx]:trial_breaks[idx + 1]]:
+ if line[3] == 1:
+ score = score + 1
+ word_list.append(line[2])
+ test = []
+ for idx, word in enumerate(word_list):
+ test.append([SequenceMatcher(None, word, x).ratio() for x in
+ [y for idx2, y in enumerate(word_list) if idx != idx2]])
+ repeats = 0
+ for word in test:
+ word_thresholded = [ceil(x) for x in word if x > 0.8]
+ n = sum(word_thresholded)
+ if n != 0:
+ repeats = repeats + (((n * (n + 1)) - 1) / (n + 1))
+ subj_scores.append([line[0], line[1].split('_')[0], score, repeats])
+
+ with open(data_output_scored_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(
+ ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7',
+ 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats', 'trial3_#_repeats', 'trial4_#_repeats',
+ 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats'])
+ subj_scores = subj_scores + ['placeholder']
+ for idx, scores in enumerate(sorted(subj_scores[:-1])):
+ scored = str(scores[2] - scores[3])
+ repeat_nm = scores[3]
+ final.append(scored)
+ final_repeats.append(repeat_nm)
+ subj_id = [scores[0]]
+ for idx2, val in enumerate(subj_id_list):
+ if subj_id[0] == subj_id_list[idx2][0]:
+ subj_list = subj_id_list[idx2][1].split('_')[3]
+ final_row = subj_id + [subj_list] + final + final_repeats
+ if scores[0] != sorted(subj_scores)[idx + 1][0]:
+ writer.writerow(final_row)
+ final_row = []
+ subj_id = []
+ final = []
+ final_repeats = []
+ csvfile.close()
+
+
+# In[63]:
+
+# Getting composite scores from scored
+
+
+# In[ ]:
+
+import numpy as np
+import pandas
+import os
+
+
+def composite_scores(input_csv, output_csv):
+ scored_data = pandas.read_csv(input_csv)
+ print input_csv
+ df_trials = scored_data.loc[:, 'trial1':'trial7']
+ print df_trials.columns.tolist()
+ composite_scores = pandas.DataFrame()
+ tmp = pandas.DataFrame()
+ composite_scores['total_learning'] = df_trials[['trial1', 'trial2', 'trial3', 'trial4', 'trial5']].apply(
+ lambda row: np.sum(row), axis=1)
+ tmp['test'] = df_trials['trial1'].tolist() * 5
+ composite_scores['corrected_total_learning'] = composite_scores['total_learning'].subtract(tmp['test'])
+
+ composite_scores['learning_rate'] = df_trials['trial5'].subtract(df_trials['trial1'], axis='rows')
+ composite_scores['proactive_interference'] = df_trials['trial1'].subtract(scored_data['listb'], axis='rows')
+ composite_scores['retroactive_interference'] = df_trials['trial5'].subtract(df_trials['trial6'], axis='rows')
+
+ composite_scores['forgetting_and_retention'] = df_trials['trial5'].subtract(df_trials['trial7'], axis='rows')
+
+ composite_scores_transposed = composite_scores.transpose()
+
+ composite_scores_transposed.to_csv(output_csv, header=True, index=['measure', 'score'])
+ composite_scores.to_csv(output_csv, header=True, index=['measure', 'score'])
+
+
+for scored in glob('/Users/cdla/Desktop/scratch/vmreact/2_vmreact/*_scored_data.csv'):
+ composite_scores(scored, scored.replace('_scored_data.csv', '_composite_scores.csv'))
+
+# In[ ]:
+
+format = "%Y_%m_%d"
+current_date = datetime.datetime.today()
+date = current_date.strftime(format)
+
+output_csv_location = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+
+for raw in glob('/Users/cdla/Desktop/scratch/vmreact/1_rawdata/*/*raw.csv'):
+ raw_data = raw
+ demo_data = raw.replace('raw.csv', 'demo.csv')
+ summary_data = raw.replace('raw.csv', 'summary.csv')
+ prefix = 'mturk_' + os.path.basename(os.path.dirname(raw_data)).split('_')[1] + '_'
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations.csv'), 0)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_primacy.csv'), 1)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_recency.csv'), 2)
+ copy(demo_data, os.path.join(output_csv_location, prefix + 'demo.csv'))
+ copy(summary_data, os.path.join(output_csv_location, prefix + 'summary.csv'))
+
+# In[4]:
+
+scored_dir = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+for scored_csv in glob(os.path.join(scored_dir, '*parsed*')):
+ with open(scored_csv, 'rb') as source:
+ rdr = csv.reader(source)
+ with open(os.path.join(scored_dir, 'tmp.csv'), 'wb') as result:
+ wtr = csv.writer(result)
+ for r in rdr:
+ wtr.writerow(r[0:18])
+ move(os.path.join(scored_dir, 'tmp.csv'), scored_csv)
+ print scored_csv
+
+# In[174]:
+
+scored_dir = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+
+demo_cols = []
+clin_raw_cols = []
+sum_cols = ['script.startdate', 'script.starttime', 'subject',
+ 'expressions.gad_7_total', 'expressions.phq_total', 'expressions.pcl_4_total',
+ 'expressions.pcl_total_hybridscore_corrected', 'expressions.pcl_total_hybridscore_uncorrected']
+scored_cols = ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3',
+ 'trial4', 'trial5', 'trial6', 'trial7', 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats',
+ 'trial3_#_repeats', 'trial4_#_repeats', 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats']
+composite_cols = ['subject', 'total_learning', 'corrected_total_learning', 'learning_rate',
+ 'proactive_interference', 'retroactive_interference', 'forgetting_and_retention']
+
+for batch in range(1, 9):
+ batch = str(batch)
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ sum = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ demo_cols.extend([x for x in demo_df.columns.tolist() if (
+ 'latency' not in x and 'online' not in x and 'Unnamed' not in x and 'time_comp' not in x and 'subj_id' not in x)])
+ print batch
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ clin_raw_cols.extend(
+ [x for x in clin_raw_df.columns.tolist() if 'latency' not in x and 'end' not in x and 'Unnamed' not in x])
+ sum_df = pd.read_csv(sum, dtype=str)
+ scored_df = pd.read_csv(scored, dtype=str)
+ comp_df = pd.read_csv(composite, dtype=str).rename(index=str, columns={'Unnamed: 0': 'subject'})
+
+demo_cols = list(set(demo_cols))
+clin_raw_cols = list(set(clin_raw_cols))
+
+print demo_cols
+print clin_raw_cols
+
+# need to get latency values,
+# use the scored to set the subject ids.
+# append composite to scored_cols since they're in the same order and composite doesn't have subject ids
+# summary - use script.subjectid
+# demo - use subject
+# clin_raw - use subject
+
+# In[281]:
+
+scored_dir = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+latency_csv = os.path.join(scored_dir, 'vmreact_latency_summary.csv')
+
+# for batch in range(1,9):
+for batch in [8]:
+ batch_df = pd.DataFrame()
+ batch = str(batch)
+ print 'mturk_batch' + batch
+
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ sum = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ sum_df = pd.read_csv(sum, dtype=str).rename(index=str, columns={'script.subjectid': 'subject'})
+ scored_df = pd.read_csv(scored)
+ comp_df = pd.read_csv(composite).rename(index=str, columns={'Unnamed: 0': 'subject'})
+ comp_df['subject'] = comp_df['subject'].apply(int)
+ vmreact_df = pd.merge(scored_df, comp_df, left_index=True, right_on='subject', how='left').drop('subject', axis=1)
+ vmreact_df['subj_id'] = vmreact_df['subj_id'].astype(str)
+ # vmreact_df['subj_id']=vmreact_df['subj_id'].apply(pd.to_numeric)
+ latency_df = pd.read_csv(latency_csv, dtype=str)
+
+ latency_df = latency_df.drop_duplicates().reset_index()
+
+ subject_ids = vmreact_df['subj_id'].tolist()
+
+ batch_demo_cols = [x for x in demo_df.columns.tolist() if x in demo_cols]
+ append_demo_cols = [x for x in demo_cols if x not in demo_df.columns.tolist()]
+ demo_df = demo_df[demo_df['subject'].astype(str).isin(subject_ids)][batch_demo_cols]
+ for col in append_demo_cols:
+ demo_df[col] = np.nan
+ # print demo_df
+ # demo_df['subject']=demo_df['subject'].apply(pd.to_numeric)
+
+ batch_clin_cols = [x for x in clin_raw_df.columns.tolist() if x in clin_raw_cols]
+ append_clin_cols = [x for x in clin_raw_cols if x not in clin_raw_df.columns.tolist()]
+ clin_raw_df = clin_raw_df[clin_raw_df['subject'].astype(str).isin(subject_ids)][batch_clin_cols]
+ for col in sorted(append_clin_cols):
+ clin_raw_df[col] = np.nan
+ # clin_raw_df['subject']=clin_raw_df['subject'].apply(pd.to_numeric)
+
+ batch_sum_cols = [x for x in sum_df.columns.tolist() if x in sum_cols]
+ append_sum_cols = [x for x in sum_cols if x not in sum_df.columns.tolist()]
+ sum_df = sum_df[sum_df['subject'].astype(str).isin(subject_ids)][batch_sum_cols]
+ for col in sorted(append_sum_cols):
+ sum_df[col] = np.nan
+ # sum_df['subject']=sum_df['subject'].apply(pd.to_numeric)
+
+ batch_df = demo_df.merge(sum_df, left_on='subject', right_on='subject').drop(
+ ['script.startdate', 'script.starttime'], axis=1)
+ batch_df = batch_df.merge(clin_raw_df, left_on='subject', right_on='subject').drop(
+ ['date_y', 'time_y', 'group_y', 'build_y'], axis=1)
+ batch_df = batch_df.merge(vmreact_df, left_on='subject', right_on='subj_id').drop('subj_id', axis=1)
+ batch_df = batch_df.rename(columns={'date_x': 'date', 'time_x': 'time', 'group_x': 'group', 'build_x': 'build'})
+ # print batch_df
+
+ print subject_ids
+ latency_df['subjid'] = latency_df['subjid'].astype(str)
+ latency_df['date'] = latency_df['date'].astype(int)
+ batch_df['date'] = batch_df['date'].astype(int)
+
+ latency_df = latency_df.loc[(latency_df['subjid'].isin(
+ batch_df['subject'].astype(str).tolist()))] # & latency_df['date'].isin(batch_df['date'].tolist()))]
+
+ latency_df = latency_df.loc[(
+ latency_df['subjid'].isin(batch_df['subject'].astype(str).tolist()) & latency_df['date'].isin(
+ batch_df['date'].tolist()))]
+
+ batch_df['subject'] = batch_df['subject'].astype(str)
+ batch_df = batch_df.merge(latency_df, left_on='subject', right_on='subjid')
+
+ batch_df.to_csv(os.path.join(scored_dir, 'mturk_batch' + batch + '_compiled.csv'))
+ os.system('open /Users/cdla/Desktop/scratch/vmreact/2_vmreact/' + 'mturk_batch' + batch + '_compiled.csv')
+
+# In[318]:
+
+dataframes_to_concat = []
+result = []
+for compiled_csv in glob(os.path.join(scored_dir, '*compiled.csv')):
+ df = pd.read_csv(compiled_csv, dtype=str)
+ dataframes_to_concat.append(df)
+
+result = pd.concat(dataframes_to_concat).reindex_axis(df.columns.tolist(), axis=1).drop(
+ ['index', 'date_y', 'subjid', 'Unnamed: 0'], axis=1).dropna(how='all', axis=1).drop_duplicates()
+
+# print result.subject
+result = result[~result.subject.isin(['XXX', 'AVD6HMIO1HLFI', 'A5EU1AQJNC7F2'])]
+result.drop_duplicates(['date_x', 'subject'])
+display(result)
+result.to_csv(os.path.join(scored_dir, 'mturk_vmreact_complete_compilation.csv'), index=False)
+
+# In[ ]:
diff --git a/build/lib/vmreact-merges/morevmreact.py b/build/lib/vmreact-merges/morevmreact.py
new file mode 100755
index 0000000..fc1d5ee
--- /dev/null
+++ b/build/lib/vmreact-merges/morevmreact.py
@@ -0,0 +1,122 @@
+# coding: utf-8
+
+# In[40]:
+
+import os
+from glob import glob
+
+import pandas as pd
+
+# In[41]:
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+parsed_list = []
+
+for batch in range(1, 9):
+ batch = str(batch)
+ parsed = os.path.join(scored_dir, 'mturk_batch' + batch + '_parsed_raw_data.csv')
+ parsed_df = pd.read_csv(parsed, dtype=str)
+ parsed_list.append(parsed_df)
+ parsed_df['identifier'] = parsed_df['subj_id'] + '_' + parsed.split('_')[-4]
+
+# In[42]:
+
+all_parsed = pd.concat(parsed_list, axis=0)
+all_parsed_df = pd.DataFrame(data=all_parsed)
+
+cols = ['subj_id', 'identifier', 'list_type', 'trial', 'response', 'score']
+final_csv = pd.DataFrame(data=all_parsed_df, columns=cols)
+# final_csv.to_csv(os.path.join(scored_dir,'parsed_raw_with_errors.csv'))
+
+
+# In[43]:
+
+zero = final_csv.loc[final_csv['score'] == '0']
+zero.dropna(inplace=True)
+incorrect_df = pd.DataFrame(data=zero)
+
+# incorrect_df.to_csv(os.path.join(scored_dir,'incorrect_response.csv'))
+
+
+# In[56]:
+
+rey_word_lists = {
+ 'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school', 'parent', 'moon', 'garden',
+ 'hat', 'farmer', 'nose', 'turkey', 'color', 'house', 'river'],
+
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student', 'mother', 'star', 'painting',
+ 'bag', 'wheat', 'mouth', 'chicken', 'sound', 'door', 'stream'],
+
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase', 'cousin', 'earth', 'stairs',
+ 'dog', 'banana', 'town', 'radio', 'hunter', 'bucket', 'field'],
+
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart', 'desert', 'face', 'letter', 'bed',
+ 'machine', 'milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove', 'mountain', 'glasses', 'towel',
+ 'cloud', 'boar', 'lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge', 'cliff', 'bottle', 'soap',
+ 'sky', 'ship', 'goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus', 'chin', 'bleach', 'soap', 'hotel',
+ 'donkey', 'spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool', 'forest', 'perfume', 'ladder',
+ 'girl', 'foot', 'shield', 'pie', 'insect', 'ball', 'car']
+}
+word_lists_df = pd.DataFrame.from_dict(rey_word_lists)
+
+# In[ ]:
+
+for lists, response in incorrect_df.groupby(level=1):
+ print response
+
+# In[55]:
+
+
+# In[45]:
+
+cols = ['typing_test_openended_sentence1', 'typing_speed_next_trial', 'typing_test_openended_sentence2',
+ 'typing_test_error2', 'typing_test_openended_sentence2', 'typing_speed_next_trial_2',
+ 'typing_test_openended_sentence3']
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/1_rawdata/'
+
+typing_test_list = []
+for scored_csv in glob(os.path.join(scored_dir, '*mturk*', '*raw.csv')):
+ raw_csv = pd.read_csv(scored_csv, dtype=str)
+ typing_test = raw_csv.loc[raw_csv['blockcode'] == 'typing_test']
+ typing_test_list.append(typing_test)
+
+# typingtest=df.loc[df['trialcode'].str.contains(trial),'trialcode']
+
+combined = pd.concat(typing_test_list, axis=0)
+
+# In[46]:
+
+typing_test_cols = combined.columns.tolist()
+final_cols = ['subject', 'date', 'blockcode', 'trialcode', 'response', 'latency']
+typing_test_only = combined[final_cols]
+unique_cols = combined['trialcode'].unique().tolist()
+
+# In[47]:
+
+typing_test_only.to_csv(os.path.join(scored_dir, 'typing_test_raw.csv'))
+
+# In[48]:
+
+# for ix, response in typing_test_only.groupby('subject'):
+# if (response.response != 57).any():
+# print response['trialcode'][0] ==
+# print response.subject.head(),response.response.unique()
+
+
+# In[38]:
+
+sentence_1 = ['typing_test_openended_sentence1', 'typing_speed_next_trial']
+sentence_2 = ['typing_test_openended_sentence2', 'typing_speed_next_trial_2']
+sentence_3 = ['typing_test_openended_sentence3', 'typing_speed_next_trial_3']
+
+for i, sentence in typing_test_only.groupby('subject'):
+ print sentence
+
+# In[ ]:
+
+
+# In[ ]:
diff --git a/build/lib/vmreact-merges/numbers_figures_vmreact.py b/build/lib/vmreact-merges/numbers_figures_vmreact.py
new file mode 100755
index 0000000..da47291
--- /dev/null
+++ b/build/lib/vmreact-merges/numbers_figures_vmreact.py
@@ -0,0 +1,213 @@
+# coding: utf-8
+
+# In[ ]:
+
+import os
+
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+
+# In[ ]:
+
+data_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+all_data = pd.read_csv(os.path.join(data_dir, 'mturk_vmreact_complete_compilation.csv'))
+indexed_df = pd.DataFrame(data=all_data.set_index(['gender_response', 'age_range']))
+
+# In[ ]:
+
+
+indexed_df['time_comp_response'].value_counts()
+print indexed_df['online_sr_q2option1_response'].value_counts()
+print indexed_df['online_sr_q2option2_response'].value_counts()
+print indexed_df['online_sr_q2option3_response'].value_counts()
+
+indexed_df.hist(
+ column=['expressions.pcl_total_hybridscore_corrected', 'expressions.phq_total', 'expressions.pcl_4_total',
+ 'gad_7_q2_response'])
+# indexed_df.hist(column=['online_sr_q2option1_response','online_sr_q2option2_response','online_sr_q2option3_response'])
+
+
+# In[ ]:
+
+for idx, data in indexed_df.groupby(level=[0, 1]):
+ print idx
+ print data['education_response'].T.value_counts()
+
+# data.hist(column=['expressions.pcl_total_hybridscore_corrected','expressions.phq_total','expressions.pcl_4_total','gad_7_q2_response'])
+
+
+# In[ ]:
+
+y = ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']
+colors = ['lightskyblue', 'lightskyblue', 'lightskyblue', 'lightskyblue', 'lightskyblue', '#96D38C', '#E1396C',
+ '#E1396C']
+
+columns = [c for c in y]
+bins = range(0, 17)
+
+for idx, val in indexed_df.groupby(level=[0, 1]):
+ if len(val[columns]) > 3:
+ trials = val.loc[:, 'listb':'trial7']
+ fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(14, 12))
+ fig = trials.plot.hist(y=y, bins=bins, subplots=True, ax=axes, legend=True, title=idx, xticks=range(0, 16),
+ xlim=(0, 16), fontsize=10, color=colors)
+ for (m, n), subplot in np.ndenumerate(axes):
+ ymin, ymax = subplot.get_ylim()
+ subplot.set_ylim(0, (ymax + 1))
+ else:
+ print idx, 'n too small'
+
+# In[ ]:
+
+# all_data.rename(columns={'gender_response':'gender','age_textbox_response':'age','date_x':'date'},inplace=True)
+
+
+# In[ ]:
+
+trial_latency_cols = ['subject', 'date']
+
+trial_latency_cols.extend(
+ [col for col in indexed_df.columns.tolist() if 'firstcharlatency' in col or 'response_latency' in col])
+
+first_char_df = pd.DataFrame(data=all_data,
+ columns=[c for c in trial_latency_cols if 'firstchar' in c or 'subject' in c])
+recall_df = pd.DataFrame(data=all_data, columns=[r for r in trial_latency_cols if 'response' in r or 'subject' in r])
+
+first_char_df.set_index(['subject']).dropna(inplace=True)
+recall_df.set_index(['subject']).dropna(inplace=True)
+
+# In[ ]:
+
+trials = ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']
+new = pd.DataFrame()
+
+for t in trials:
+ new[t + '_initiation_latency'] = all_data[t + '_values.response_latency'].subtract(
+ all_data[t + '_values.recall_firstcharlatency'])
+
+# In[ ]:
+
+pd.concat([all_data, new], axis=1).to_csv(os.path.join(data_dir, 'mturk_vmreact_complete_compilation_initiation.csv'))
+
+# In[ ]:
+
+import os
+import numpy as np
+import pandas as pd
+from glob import glob
+
+# In[ ]:
+
+data_dir = '/Users/lillyel-said/Desktop/vmreact/final_inquisit_launches/launches/broken_up_by_each_launch/0612217_reyravlt_antr_pilot4'
+output_dir = '/Users/lillyel-said/Desktop/vmreact/final_inquisit_launches/launches/broken_up_by_each_launch/0612217_reyravlt_antr_pilot4/test'
+
+# In[ ]:
+
+trials = ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']
+
+cols = ['values.response_latency', 'expressions.trial_recall_word_latency',
+ 'values.recall_firstcharlatency', 'values.recall_lastcharlatency']
+
+column_titles = ['subjid', 'date']
+
+for trial in trials:
+ for meas in cols:
+ column_titles.append(trial + "_" + meas)
+
+final_csv = [column_titles]
+print cols
+
+# In[ ]:
+
+total_columns = []
+for data_file in glob(os.path.join(data_dir, '*raw.csv')):
+ data_df = pd.read_csv(data_file, dtype=str)
+ data_df.loc[data_df['response'] == ' ', 'trialcode'] = 'trial_confirmation'
+ for trial in ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7', 'trial8', 'listb']:
+ data_df.loc[data_df['trialcode'].str.contains(trial), 'trialcode'] = trial
+
+ data_df.rename(columns
+ ={'latency': 'values.response_latency'}, inplace=True)
+
+ for col in data_df.columns.tolist():
+ if col not in total_columns:
+ total_columns.append(col)
+print sorted(total_columns)
+for t in trials:
+ new[t + '_initiation_latency'] = all_data[t + '_values.response_latency'].subtract(
+ all_data[t + '_values.recall_firstcharlatency'])
+
+# In[ ]:
+
+cols = ['typing_test_openended_sentence1', 'typing_speed_next_trial', 'typing_test_openended_sentence2',
+ 'typing_test_error2', 'typing_test_openended_sentence2', 'typing_speed_next_trial_2',
+ 'typing_test_openended_sentence3']
+
+for data_file in glob(os.path.join(data_dir, '*raw.csv')):
+ df = pd.read_csv(data_file, dtype=str)
+ typing_test = df.loc[df['blockcode'] == 'typing_test']
+
+# for trial in cols:
+#
+# typingtest=df.loc[df['trialcode'].str.contains(trial),'trialcode']
+# print df.loc[df['trialcode']==trial]
+
+
+# In[ ]:
+
+for data_file in glob(os.path.join(data_dir, '*raw.csv')):
+ data_df = pd.read_csv(data_file, dtype=str)
+ data_df.loc[data_df['response'] == ' ', 'trialcode'] = 'trial_confirmation'
+
+ for trial in ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7', 'trial8', 'listb']:
+ data_df.loc[data_df['trialcode'].str.contains(trial), 'trialcode'] = trial
+
+ data_df.rename(columns={'latency': 'values.response_latency'}, inplace=True)
+
+ subj_list = data_df.loc[data_df['trialcode'] == 'trial8', 'subject'].unique()
+ if len(subj_list) > 0:
+ data_df.loc[(data_df['trialcode'] == 'trial6') & (data_df['subject'].isin(subj_list)), 'trialcode'] = 'listb'
+ data_df.loc[(data_df['trialcode'] == 'trial7') & (data_df['subject'].isin(subj_list)), 'trialcode'] = 'trial6'
+ data_df.loc[(data_df['trialcode'] == 'trial8') & (data_df['subject'].isin(subj_list)), 'trialcode'] = 'trial7'
+
+ for subj, subj_df in data_df.groupby(['subject']):
+ measures = []
+ for trial, trial_df in subj_df.groupby(['trialcode']):
+ if trial in ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']:
+ trial_measures = [np.nan] * 4
+ for idx, meas in enumerate(cols):
+ if 'values.recall' in meas:
+ print subj, meas, trial_df['values.recall_lastcharlatency'].astype(float).subtract(
+ trial_df['values.recall_firstcharlatency'].astype(float))
+ try:
+ trial_measures[idx] = round(trial_df[meas].astype('float').mean(), 4)
+ # print meas,trial_measures[idx]
+ # subj_df[meas].subtract(subj_df[trial+'_values.recall_firstcharlatency'])
+ x = trial_df['values.recall_lastcharlatency'][trial_measures].subtract(
+ trial_df['values.recall_firstcharlatency'][trial_measures], axis=1)
+ except:
+ trial_measures[idx] = np.nan
+ continue
+
+ measures.append([trial] + trial_measures)
+ elif trial == 'trial_confirmation':
+ confirmation_mean = trial_df['values.response_latency'].astype(float).mean()
+ confirmation_vals = trial_df['values.response_latency'].astype(float)
+ subj_line = [subj, subj_df['date'].unique().astype(str)[0]]
+# for trial in ['trial1','trial2','trial3','trial4','trial5','listb','trial6','trial7']:
+# try:
+# trial_idx=[meas[0] for meas in measures].index(trial)
+# subj_line.extend(measures[int(trial_idx)][1:])
+# except:
+# subj_line.exteend(4*np.nan)
+# continue
+
+# print confirmation_mean,confirmation_vals
+# final_csv.append(subj_line)
+
+
+# In[ ]:
+
+
+# In[ ]:
diff --git a/build/lib/vmreact-merges/single_inq_subj.py b/build/lib/vmreact-merges/single_inq_subj.py
new file mode 100755
index 0000000..6904796
--- /dev/null
+++ b/build/lib/vmreact-merges/single_inq_subj.py
@@ -0,0 +1,104 @@
+# coding: utf-8
+
+# In[ ]:
+
+# import os
+# import numpy as np
+# import pandas as pd
+# import csv
+# from glob import glob
+
+
+# In[ ]:
+
+# data_dir='/Users/lillyel-said/Desktop/vmreact/vmreact/1_rawdata/data/'
+# output_dir='/Users/lillyel-said/Desktop/vmreact/vmreact/1_rawdata/data/'
+
+
+# In[ ]:
+
+# trials=['trial1','trial2','trial3','trial4','trial5','listb','trial6','trial7']
+
+# cols=['values.response_latency', 'expressions.trial_recall_word_latency',
+# 'values.recall_firstcharlatency', 'values.recall_lastcharlatency']
+
+# column_titles=['subjid','date']
+
+# for trial in trials:
+# for meas in cols:
+# column_titles.append(trial+"_"+meas)
+
+# final_csv=[column_titles]
+
+
+# In[ ]:
+
+# total_columns=[]
+# for data_file in glob(os.path.join(data_dir,'*')):
+
+# data_df=pd.read_csv(data_file,dtype=str)
+# data_df.loc[data_df['response']==' ','trialcode'] = 'trial_confirmation'
+
+# for trial in ['trial1','trial2','trial3','trial4','trial5','trial6','trial7','trial8','listb']:
+# data_df.loc[data_df['trialcode'].str.contains(trial),'trialcode']=trial
+
+# data_df.rename(columns
+# ={'latency':'values.response_latency'},inplace=True)
+
+# for col in data_df.columns.tolist():
+# if col not in total_columns:
+# total_columns.append(col)
+# print sorted(total_columns)
+
+
+# In[ ]:
+
+# for data_file in glob(os.path.join(data_dir,'*.csv')):
+
+# data_df=pd.read_csv(data_file,dtype=str)
+# data_df.loc[data_df['response']==' ','trialcode'] = 'trial_confirmation'
+
+# for trial in ['trial1','trial2','trial3','trial4','trial5','trial6','trial7','trial8','listb']:
+# data_df.loc[data_df['trialcode'].str.contains(trial),'trialcode']=trial
+
+# data_df.rename(columns={'latency':'values.response_latency'},inplace=True)
+
+# subj_list=data_df.loc[data_df['trialcode'] == 'trial8','subject'].unique()
+# if len(subj_list) > 0:
+# data_df.loc[(data_df['trialcode'] == 'trial6') & (data_df['subject'].isin(subj_list)),'trialcode']='listb'
+# data_df.loc[(data_df['trialcode'] == 'trial7') & (data_df['subject'].isin(subj_list)),'trialcode']='trial6'
+# data_df.loc[(data_df['trialcode'] == 'trial8') & (data_df['subject'].isin(subj_list)),'trialcode']='trial7'
+
+# for subj,subj_df in data_df.groupby(['subject']):
+# measures=[]
+# for trial,trial_df in subj_df.groupby(['trialcode']):
+# if trial in ['trial1','trial2','trial3','trial4','trial5','listb','trial6','trial7']:
+# trial_measures=[np.nan]*4
+# for idx,meas in enumerate(cols):
+# try:
+# trial_measures[idx]=round(trial_df[meas].astype('float').mean(),4)
+# xnew[trial+'_'+meas]=trial_df['values.response_latency']
+
+# for t in trials:
+# [t+'_initiation_latency']=all_data[t+'_values.response_latency'].subtract(all_data[t+'_values.recall_firstcharlatency'])
+
+# except:
+# trial_measures[idx]=np.nan
+# continue
+# break
+# measures.append([trial] + trial_measures)
+# elif trial == 'trial_confirmation':
+# confirmation_mean=trial_df['values.response_latency'].astype(float).mean()
+# confirmation_vals=trial_df['values.response_latency'].astype(float)
+# subj_line=[subj,subj_df['date'].unique().astype(str)[0]]
+
+# for trial in ['trial1','trial2','trial3','trial4','trial5','listb','trial6','trial7']:
+# try:
+# trial_idx=[meas[0] for meas in measures].index(trial)
+# subj_line.extend(measures[int(trial_idx)][1:])
+# except:
+# subj_line.exteend(4*np.nan)
+# continue
+
+# print confirmation_mean,confirmation_vals
+# final_csv.append(subj_line)
diff --git a/build/lib/vmreact-merges/vmreact_compilation.py b/build/lib/vmreact-merges/vmreact_compilation.py
new file mode 100755
index 0000000..fbd2ef9
--- /dev/null
+++ b/build/lib/vmreact-merges/vmreact_compilation.py
@@ -0,0 +1,810 @@
+# coding: utf-8
+
+# In[2]:
+
+import datetime
+from glob import glob
+from shutil import copy, move
+
+import pandas as pd
+from IPython.display import display
+
+
+# #Grading Script
+
+# In[102]:
+
+
+def grader(all_subj_data_csv, data_output_raw_csv, data_output_scored_csv, word_corr, p_r):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ subj_listtype = []
+ for idx, row in enumerate(all_subj_csv_lines['subject']):
+ if 'rey_list' in all_subj_csv_lines['trialcode'][idx]:
+ subj_listtype.append([all_subj_csv_lines['subject'][idx], all_subj_csv_lines['trialcode'][idx]])
+
+ set_subj_listtype = []
+ for subj in subj_listtype:
+ if subj not in set_subj_listtype:
+ set_subj_listtype.append(subj)
+
+ ## count per list type
+ index_number_resp = dict()
+ for list_type in sorted([x for x in set(all_subj_csv_lines['trialcode']) if 'rey_list' in x]):
+ index_number_resp[list_type] = []
+
+ for idx, response in enumerate(all_subj_csv_lines['response']):
+ if 'recall_response' in all_subj_csv_lines['trialcode'][idx]:
+ if 'listb' not in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][
+ 1]].append(response.lower().strip())
+ elif 'listb' in all_subj_csv_lines['trialcode'][idx]:
+ index_number_resp[
+ set_subj_listtype[[x[0] for x in set_subj_listtype].index(all_subj_csv_lines['subject'][idx])][1][
+ :-1] + 'b'].append(response.lower().strip())
+
+ counter_dict = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ rey_recall_word_count = collections.Counter(index_number_resp[list_type])
+ counter_dict[list_type] = rey_recall_word_count
+
+ total_response_for_list = dict()
+ for list_type in sorted(index_number_resp.keys()):
+ total_response_for_list[list_type] = sorted(set(index_number_resp[list_type]))
+
+ if p_r == 0:
+ rey_word_lists = {
+ 'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school', 'parent', 'moon', 'garden',
+ 'hat', 'farmer', 'nose', 'turkey', 'color', 'house', 'river'],
+
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student', 'mother', 'star', 'painting',
+ 'bag', 'wheat', 'mouth', 'chicken', 'sound', 'door', 'stream'],
+
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase', 'cousin', 'earth', 'stairs',
+ 'dog', 'banana', 'town', 'radio', 'hunter', 'bucket', 'field'],
+
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart', 'desert', 'face', 'letter', 'bed',
+ 'machine', 'milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove', 'mountain', 'glasses', 'towel',
+ 'cloud', 'boar', 'lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge', 'cliff', 'bottle', 'soap',
+ 'sky', 'ship', 'goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus', 'chin', 'bleach', 'soap', 'hotel',
+ 'donkey', 'spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool', 'forest', 'perfume', 'ladder',
+ 'girl', 'foot', 'shield', 'pie', 'insect', 'ball', 'car']
+ }
+ elif p_r == 1:
+ rey_word_lists = {'rey_list_presentation_1a': ['drum', 'curtain', 'bell', 'coffee', 'school'],
+ 'rey_list_presentation_2a': ['pipe', 'wall', 'alarm', 'sugar', 'student'],
+ 'rey_list_presentation_3a': ['violin', 'tree', 'scarf', 'ham', 'suitcase'],
+ 'rey_list_presentation_4a': ['doll', 'mirror', 'nail', 'sailor', 'heart'],
+ 'rey_list_presentation_1b': ['desk', 'ranger', 'bird', 'shoe', 'stove'],
+ 'rey_list_presentation_2b': ['bench', 'officer', 'cage', 'sock', 'fridge'],
+ 'rey_list_presentation_3b': ['orange', 'table', 'toad', 'corn', 'bus'],
+ 'rey_list_presentation_4b': ['dish', 'jester', 'hill', 'coat', 'tool']
+ }
+ elif p_r == 2:
+ rey_word_lists = {'rey_list_presentation_1a': ['nose', 'turkey', 'color', 'house', 'river'],
+ 'rey_list_presentation_2a': ['mouth', 'chicken', 'sound', 'door', 'stream'],
+ 'rey_list_presentation_3a': ['town', 'radio', 'hunter', 'bucket', 'field'],
+ 'rey_list_presentation_4a': ['milk', 'helmet', 'music', 'horse', 'road'],
+ 'rey_list_presentation_1b': ['lamb', 'gun', 'pencil', 'church', 'fish'],
+ 'rey_list_presentation_2b': ['goat', 'bullet', 'paper', 'chapel', 'crab'],
+ 'rey_list_presentation_3b': ['spider', 'money', 'book', 'soldier', 'padlock'],
+ 'rey_list_presentation_4b': ['shield', 'pie', 'insect', 'ball', 'car']
+ }
+
+ with open(word_corr, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ for word_list in sorted(total_response_for_list.keys()):
+ word_corrs = []
+ for word in total_response_for_list[word_list]:
+ wordcorrs = [round(SequenceMatcher(None, word, x).ratio(), 3) for x in rey_word_lists[word_list]]
+ word_corrs.append(wordcorrs)
+ writer.writerow([word, max(wordcorrs), rey_word_lists[word_list][wordcorrs.index(max(wordcorrs))]])
+ csvfile.close()
+
+ subj_id_list = []
+ subj_only = []
+ for subj in sorted(set(all_subj_csv_lines['subject'])):
+ try:
+ subj_list_type = [all_subj_csv_lines['trialcode'][x] for x in range(len(all_subj_csv_lines['subject']))
+ if (all_subj_csv_lines['subject'][x] == subj) and (
+ 'rey_list_presentation_' in all_subj_csv_lines['trialcode'][x])][0]
+ subj_id_list.append([subj, subj_list_type])
+ subj_only.append(subj)
+ except:
+ print "%s has an error in their data" % subj
+ continue
+
+ full_raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+ all_responses = []
+ repeats = []
+ list_b_all = []
+ list_a_all = []
+ with open(data_output_raw_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(('subj_id', 'list_type', 'trial', 'response', 'score'))
+ for response in full_raw_data_responses:
+ subj = response[0]
+ list_to_use = [subj_id_list[x][1] for x in range(len(subj_id_list)) if subj_id_list[x][0] == subj][0]
+ list_a_all.append(list_to_use)
+ list_b = list_to_use[:-1] + 'b'
+ list_b_all.append(list_b)
+ if 'listb' in response[1]:
+ if response[2] in rey_word_lists[list_b]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_b]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_b, response[1].split('_')[0], response[2], response[3]
+ else:
+ if response[2] in rey_word_lists[list_to_use]:
+ response.append(1)
+ else:
+ if any(n > 0.8 for n in
+ [SequenceMatcher(None, response[2], x).ratio() for x in rey_word_lists[list_to_use]]):
+ response.append(1)
+ else:
+ response.append(0)
+ new_row = response[0], list_to_use, response[1].split('_')[0], response[2], response[3]
+ writer.writerow(new_row)
+ all_responses.append(response)
+ rep = new_row
+ repeats.append(rep)
+ csvfile.close()
+
+ trial_breaks = []
+ trial_lines = [all_responses[y][1] for y in range(0, len(all_responses))]
+ trial_breaks = [i for i, x in enumerate(trial_lines[0:])
+ if x.split('_')[0] != trial_lines[i - 1].split('_')[0]]
+
+ trial_breaks = trial_breaks + [len(all_responses)]
+
+ subj_scores = []
+ final = []
+ final_repeats = []
+ for idx, val in enumerate(trial_breaks[:-1]):
+ score = 0
+ word_list = []
+ for line in all_responses[trial_breaks[idx]:trial_breaks[idx + 1]]:
+ if line[3] == 1:
+ score = score + 1
+ word_list.append(line[2])
+ test = []
+ for idx, word in enumerate(word_list):
+ test.append([SequenceMatcher(None, word, x).ratio() for x in
+ [y for idx2, y in enumerate(word_list) if idx != idx2]])
+ repeats = 0
+ for word in test:
+ word_thresholded = [ceil(x) for x in word if x > 0.8]
+ n = sum(word_thresholded)
+ if n != 0:
+ repeats = repeats + (((n * (n + 1)) - 1) / (n + 1))
+ subj_scores.append([line[0], line[1].split('_')[0], score, repeats])
+
+ with open(data_output_scored_csv, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(
+ ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'trial6', 'trial7',
+ 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats', 'trial3_#_repeats', 'trial4_#_repeats',
+ 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats'])
+ subj_scores = subj_scores + ['placeholder']
+ for idx, scores in enumerate(sorted(subj_scores[:-1])):
+ scored = str(scores[2] - scores[3])
+ repeat_nm = scores[3]
+ final.append(scored)
+ final_repeats.append(repeat_nm)
+ subj_id = [scores[0]]
+ for idx2, val in enumerate(subj_id_list):
+ if subj_id[0] == subj_id_list[idx2][0]:
+ subj_list = subj_id_list[idx2][1].split('_')[3]
+ final_row = subj_id + [subj_list] + final + final_repeats
+ if scores[0] != sorted(subj_scores)[idx + 1][0]:
+ writer.writerow(final_row)
+ final_row = []
+ subj_id = []
+ final = []
+ final_repeats = []
+ csvfile.close()
+
+
+# #demo and age range function
+#
+
+# In[103]:
+
+
+def demo_and_summary(all_subj_data_csv, demographic_data, final_summary_csv, frequency_count, subj_age_agerange_gender,
+ sr_responses, summary_ant_scores):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ with open(demographic_data, 'U') as file:
+ input_demo_sr_q_csv = csv.reader(file)
+ input_demo_sr_q_csv = map(list, zip(*input_demo_sr_q_csv))
+ demographic_data = dict((rows[0], rows[1:]) for rows in (input_demo_sr_q_csv))
+
+ with open(final_summary_csv, 'U') as file:
+ final_summary_lines = csv.reader(file)
+ final_summary_lines = map(list, zip(*final_summary_lines))
+ rey_summary = dict((rows[0], rows[1:]) for rows in (final_summary_lines))
+
+ age_ranges = {
+ '16-19': range(16, 20, 1),
+ '20-29': range(20, 30, 1),
+ '30-39': range(30, 40, 1),
+ '40-49': range(40, 50, 1),
+ '50-59': range(50, 60, 1),
+ '57-69': range(57, 70, 1),
+ '70-79': range(70, 80, 1),
+ '76-89': range(76, 90, 1)
+ }
+ subj_id_list_demo = []
+ subj_id_only_demo = []
+
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only_demo.append(subject)
+ subj_id_list_combined = [demographic_data['subject'][x] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subject]
+ subj_id_list_demo.append(subj_id_list_combined)
+
+ subj_id_combined = [(idx, val) for idx, val in enumerate(sorted(subj_id_only_demo))]
+
+ subj_val = []
+ key_val_all = []
+ for key in sorted(demographic_data.keys()):
+ for value in sorted(demographic_data[key]):
+ key_val_all.append([key, value])
+ if 'subject' in key:
+ subj_val.append(value)
+ else:
+ continue
+
+ subj_id_with_index = list()
+ for subj_num in subj_val:
+ subj_combined = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo)) if val == subj_num]
+ subj_indexvals = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo))]
+ subj_id_with_index.append(subj_combined)
+
+ new_demo_dict = dict()
+ for key_var in sorted(demographic_data.keys()):
+ if 'latency' not in key_var and 'group' not in key_var and 'build' not in key_var and 'time' not in key_var and 'date' not in key_var:
+ new_demo_dict[key_var] = []
+
+ for index1, val1 in enumerate(key_val_all):
+ if val1[0] in new_demo_dict.keys():
+ new_demo_dict[val1[0]].append(val1[1])
+
+ counter_demo_dict = dict()
+ for key_q in sorted(new_demo_dict.keys()):
+ answer_count = collections.Counter(new_demo_dict[key_q])
+ print answer_count
+ counter_demo_dict[key_q] = answer_count
+
+ with open(frequency_count, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['survey_question', 'response_counts'])
+ for key, value in sorted(counter_demo_dict.items()):
+ writer.writerow([key, value])
+ csvfile.close()
+
+ subj_age_gender_mem = []
+ x = []
+ for idx2, subj_id in enumerate(subj_id_only_demo):
+ subj_age_gen = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]] for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x] == subj_id]
+ y = [[demographic_data['subject'][x]] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subj_id]
+ subj_age_gender_mem.append(subj_age_gen)
+
+ demo_subj_age_gender = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]]
+ for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x]]
+
+ raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+
+ key_val = []
+ for key in age_ranges.keys():
+ for val in age_ranges[key]:
+ key_val.append([key, val])
+
+ id_age_agerange = []
+ with open(subj_age_agerange_gender, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'gender', 'age', 'age_range'])
+ for subj in sorted(demo_subj_age_gender):
+ subj_from_main_raw_list = []
+ ages = subj[2]
+ gender = subj[1]
+ subj_id_raw = [val for val in raw_data_responses if val[0] == subj[0]]
+ for vals in key_val:
+ age_vals = vals[1]
+ age_vals = str(age_vals)
+ if age_vals == ages:
+ complete_list = subj[0] + ',' + gender + "," + age_vals + "," + vals[0]
+ id_age_agerange.append(complete_list)
+ writer.writerow([subj[0], gender, age_vals, vals[0]])
+ csvfile.close()
+
+ subj_id_only = []
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only.append(subject)
+
+ subj_id_memory = [subj_mem_trials for subj_mem_trials in subj_id_only]
+
+ subj_ids_summary = [x for x in rey_summary['script.subjectid']]
+ subj_ids_summary = sorted(subj_ids_summary)
+
+ summary_key_val = []
+ for key in sorted(rey_summary.keys()):
+ for value in sorted(rey_summary[key]):
+ summary_key_val.append([key, value])
+
+ new_summary_dict = dict()
+ for sum_key in sorted(rey_summary.keys()):
+ if 'script.starttime' not in sum_key and 'script.startdate' not in sum_key and 'script.elapsedtime' not in sum_key and 'values.trialcount' not in sum_key and 'values.completed' not in sum_key and 'values.trialcount' not in sum_key and 'parameters.min_validlatency' not in sum_key and 'computer.platform' not in sum_key:
+ new_summary_dict[sum_key] = []
+
+ for sum_idx, sum_val in enumerate(summary_key_val):
+ if sum_val[0] in new_summary_dict.keys():
+ new_summary_dict[sum_val[0]].append(sum_val[1])
+
+ subject_summary_sr_responses = [[rey_summary['script.subjectid'][x], rey_summary['expressions.gad_7_total'][x],
+ rey_summary['expressions.phq_total'][x],
+ rey_summary['expressions.pcl_4_total'][x],
+ rey_summary['expressions.pcl_total_hybridscore_corrected'][x]] for x in
+ range(len(rey_summary['script.subjectid'])) if
+ rey_summary['values.end_survey_completed'][x] == '1']
+
+ subject_summary_ant_scores = [
+ [rey_summary['script.subjectid'][x], rey_summary['expressions.overallpercentcorrect'][x],
+ rey_summary['expressions.meanRT'][x], rey_summary['expressions.stdRT'][x]] for x in
+ range(len(rey_summary['script.subjectid'])) if rey_summary['values.end_survey_completed'][x] == '1']
+
+ with open(sr_responses, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'gad_7', 'phq', 'pcl_dsm4', 'pcl_hybrid'])
+ for responses in sorted(subject_summary_sr_responses):
+ writer.writerow(responses)
+ csvfile.close()
+
+ with open(summary_ant_scores, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'percent_correct', 'meanRT', 'stdRT'])
+ for scores in sorted(subject_summary_ant_scores):
+ writer.writerow(scores)
+ csvfile.close()
+
+
+# In[105]:
+
+
+def demo_and_summary_new(all_subj_data_csv, demographic_data, subj_age_agerange_gender):
+ with open(all_subj_data_csv, 'U') as file:
+ input_csv_lines_all_subj = csv.reader(file)
+ input_csv_lines_all_subj = map(list, zip(*input_csv_lines_all_subj))
+ all_subj_csv_lines = dict((rows[0], rows[1:]) for rows in input_csv_lines_all_subj)
+
+ with open(demographic_data, 'U') as file:
+ input_demo_sr_q_csv = csv.reader(file)
+ input_demo_sr_q_csv = map(list, zip(*input_demo_sr_q_csv))
+ demographic_data = dict((rows[0], rows[1:]) for rows in (input_demo_sr_q_csv))
+
+ age_ranges = {
+ '20-29': range(20, 30, 1),
+ '30-39': range(30, 40, 1),
+ '40-49': range(40, 50, 1),
+ '50-59': range(50, 60, 1),
+ '60-69': range(60, 70, 1),
+ '70-90': range(70, 90, 1)}
+
+ subj_id_list_demo = []
+ subj_id_only_demo = []
+
+ for subject in sorted(set(all_subj_csv_lines['subject'])):
+ subj_id_only_demo.append(subject)
+ subj_id_list_combined = [demographic_data['subject'][x] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subject]
+ subj_id_list_demo.append(subj_id_list_combined)
+
+ subj_id_combined = [(idx, val) for idx, val in enumerate(sorted(subj_id_only_demo))]
+
+ subj_val = []
+ key_val_all = []
+ for key in sorted(demographic_data.keys()):
+ for value in sorted(demographic_data[key]):
+ key_val_all.append([key, value])
+ if 'subject' in key:
+ subj_val.append(value)
+ else:
+ continue
+
+ subj_id_with_index = list()
+ for subj_num in subj_val:
+ subj_combined = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo)) if val == subj_num]
+ subj_indexvals = [[idx, val] for idx, val in enumerate(sorted(subj_id_only_demo))]
+ subj_id_with_index.append(subj_combined)
+
+ subj_age_gender_mem = []
+ x = []
+ for idx2, subj_id in enumerate(subj_id_only_demo):
+ subj_age_gen = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]] for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x] == subj_id]
+ y = [[demographic_data['subject'][x]] for x in range(len(demographic_data['subject'])) if
+ demographic_data['subject'][x] == subj_id]
+ subj_age_gender_mem.append(subj_age_gen)
+
+ demo_subj_age_gender = [[demographic_data['subject'][x], demographic_data['gender_response'][x].lower(),
+ demographic_data['age_textbox_response'][x]]
+ for x in range(len(demographic_data['subject']))
+ if demographic_data['subject'][x]]
+
+ raw_data_responses = [[all_subj_csv_lines['subject'][x], all_subj_csv_lines['trialcode'][x],
+ all_subj_csv_lines['response'][x].lower()]
+ for x in range(len(all_subj_csv_lines['subject']))
+ if 'recall_response' in all_subj_csv_lines['trialcode'][x]]
+
+ key_val = []
+ for key in age_ranges.keys():
+ for val in age_ranges[key]:
+ key_val.append([key, val])
+
+ id_age_agerange = []
+ with open(subj_age_agerange_gender, 'wb') as csvfile:
+ writer = csv.writer(csvfile, delimiter=',')
+ writer.writerow(['subj_id', 'age', 'age_range', 'gender'])
+ for subj in sorted(demo_subj_age_gender):
+ subj_from_main_raw_list = []
+ ages = subj[2]
+ gender = subj[1]
+ subj_id_raw = [val for val in raw_data_responses if val[0] == subj[0]]
+ for vals in key_val:
+ age_vals = vals[1]
+ age_vals = str(age_vals)
+ if age_vals == ages:
+ complete_list = subj[0] + ',' + age_vals + "," + vals[0] + "," + gender
+ id_age_agerange.append(complete_list)
+ writer.writerow([subj[0], age_vals, vals[0], gender])
+ csvfile.close()
+
+
+# In[104]:
+
+
+import pandas
+
+
+def composite_scores(input_csv, output_csv):
+ scored_data = pandas.read_csv(input_csv)
+ print input_csv
+ df_trials = scored_data.loc[:, 'trial1':'trial7']
+ print df_trials.columns.tolist()
+ composite_scores = pandas.DataFrame()
+ tmp = pandas.DataFrame()
+ composite_scores['total_learning'] = df_trials[['trial1', 'trial2', 'trial3', 'trial4', 'trial5']].apply(
+ lambda row: np.sum(row), axis=1)
+ tmp['test'] = df_trials['trial1'].tolist() * 5
+ composite_scores['corrected_total_learning'] = composite_scores['total_learning'].subtract(tmp['test'])
+
+ composite_scores['learning_rate'] = df_trials['trial5'].subtract(df_trials['trial1'], axis='rows')
+ composite_scores['proactive_interference'] = df_trials['trial1'].subtract(scored_data['listb'], axis='rows')
+ composite_scores['retroactive_interference'] = df_trials['trial5'].subtract(df_trials['trial6'], axis='rows')
+
+ composite_scores['forgetting_and_retention'] = df_trials['trial5'].subtract(df_trials['trial7'], axis='rows')
+
+ composite_scores_transposed = composite_scores.transpose()
+
+ composite_scores_transposed.to_csv(output_csv, header=True, index=['measure', 'score'])
+ composite_scores.to_csv(output_csv, header=True, index=['measure', 'score'])
+
+
+# for scored in glob('/Users/lillyel-said/Desktop/vmreact/output/*_scored_data.csv'):
+# composite_scores(scored,scored.replace('_scored_data.csv','_composite_scores.csv'))
+
+
+# In[119]:
+
+import os
+import csv
+import collections
+from difflib import SequenceMatcher
+from math import ceil
+
+format = "%Y_%m_%d"
+current_date = datetime.datetime.today()
+date = current_date.strftime(format)
+
+output = '/Users/lillyel-said/Desktop/data_transfer/demo'
+os.chdir('/Users/lillyel-said/Desktop/stanford/scripts/inquisit/final/grader_inq_to_edit')
+
+
+def restructure_and_regrade_all_data(output):
+ for raw in glob(os.path.join(output, '*raw.csv')):
+ all_subj_data_csv = raw
+ path = raw.split('/')[-1]
+ path = path.split('_')[0:3]
+ id = '_'.join(path) + '_inquisit'
+ dir = os.path.join('/Users/lillyel-said/Desktop/data_transfer/', id, 'out')
+ demo_data = glob(
+ os.path.join('/Users/lillyel-said/Desktop/data_transfer/', id, 'csv', '*demographics_survey.csv'))
+ summary_data = glob(os.path.join('/Users/lillyel-said/Desktop/data_transfer/', id, 'csv', '*summary.csv'))
+ print summary_data[0]
+ grader(all_subj_data_csv, os.path.join(dir, 'parsed_raw_data' + '_' + date + '.csv'),
+ os.path.join(dir, 'scored_data' + '_' + date + '.csv'),
+ os.path.join(dir, 'word_correlations' + '_' + date + '.csv'), 0)
+ grader(all_subj_data_csv, os.path.join(dir, 'parsed_raw_data_primacy' + '_' + date + '.csv'),
+ os.path.join(dir, 'scored_data_primacy' + '_' + date + '.csv'),
+ os.path.join(dir, 'word_correlations_primacy' + '_' + date + '.csv'), 1)
+ grader(all_subj_data_csv, os.path.join(dir, 'parsed_raw_data_recency' + '_' + date + '.csv'),
+ os.path.join(dir, 'scored_data_recency' + '_' + date + '.csv'),
+ os.path.join(dir, 'word_correlations_recency' + '_' + date + '.csv'), 2)
+ composite_scores(os.path.join(dir, 'scored_data' + '_' + date + '.csv'),
+ os.path.join(dir, 'composite_scores_vakil' + '_' + date + '.csv'))
+ try:
+ demo_and_summary(all_subj_data_csv, demo_data[0], summary_data[0],
+ os.path.join(dir, 'frequency_counts' + '_' + date + '.csv'),
+ os.path.join(dir, 'subj_age_agerange_gender' + '_' + date + '.csv'),
+ os.path.join(dir, 'sr_responses' + '_' + date + '.csv'),
+ os.path.join(dir, 'summary_ant_scores' + '_' + date + '.csv'))
+ demo_and_summary_new(all_subj_data_csv, demo_data[0],
+ os.path.join(dir, 'subj_age_agerange_gender_new_age_bins' + '_' + date + '.csv'))
+ except:
+ continue
+
+# In[86]:
+
+
+# rename
+output_csv_location = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+raw_data_csvs = '/Users/cdla/Desktop/scratch/vmreact/1_rawdata/*/*raw.csv'
+
+
+def standardize_and_rename_scored_csvs(output_csv_location, raw_data_path):
+ format = "%Y_%m_%d"
+ current_date = datetime.datetime.today()
+ date = current_date.strftime(format)
+
+ output_csv_location = '/Users/cdla/Desktop/scratch/vmreact/2_vmreact/'
+
+ for raw in glob('/Users/cdla/Desktop/scratch/vmreact/1_rawdata/*/*raw.csv'):
+ raw_data = raw
+ demo_data = raw.replace('raw.csv', 'demo.csv')
+ summary_data = raw.replace('raw.csv', 'summary.csv')
+ prefix = 'mturk_' + os.path.basename(os.path.dirname(raw_data)).split('_')[1] + '_'
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations.csv'), 0)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_primacy.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_primacy.csv'), 1)
+ grader(raw_data, os.path.join(output_csv_location, prefix + 'parsed_raw_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'scored_data_recency.csv'),
+ os.path.join(output_csv_location, prefix + 'word_correlations_recency.csv'), 2)
+ copy(demo_data, os.path.join(output_csv_location, prefix + 'demo.csv'))
+ copy(summary_data, os.path.join(output_csv_location, prefix + 'summary.csv'))
+
+
+# In[ ]:
+
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/output/'
+for scored_csv in glob(os.path.join(scored_dir, '*scored*')):
+ with open(scored_csv, 'U') as source:
+ rdr = csv.reader(source)
+ with open(os.path.join(scored_dir, 'tmp.csv'), 'wb') as result:
+ wtr = csv.writer(result)
+ for r in rdr:
+ wtr.writerow(r[0:18])
+ move(os.path.join(scored_dir, 'tmp.csv'), scored_csv)
+ print scored_csv
+
+# In[ ]:
+
+# Getting composite scores from scored
+
+
+# In[ ]:
+
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/output/'
+
+
+# defining all dataframes from scored data
+
+def set_scored_to_df(scored_dir):
+ demo_cols = []
+ clin_raw_cols = []
+ sum_cols = ['script.startdate', 'script.starttime', 'subject',
+ 'expressions.gad_7_total', 'expressions.phq_total', 'expressions.pcl_4_total',
+ 'expressions.pcl_total_hybridscore_corrected', 'expressions.pcl_total_hybridscore_uncorrected']
+ scored_cols = ['subj_id', 'list_type', 'listb', 'trial1', 'trial2', 'trial3',
+ 'trial4', 'trial5', 'trial6', 'trial7', 'listb_#_repeats', 'trial1_#_repeats', 'trial2_#_repeats',
+ 'trial3_#_repeats', 'trial4_#_repeats', 'trial5_#_repeats', 'trial6_#_repeats', 'trial7_#_repeats']
+ composite_cols = ['subject', 'total_learning', 'corrected_total_learning', 'learning_rate',
+ 'proactive_interference', 'retroactive_interference', 'forgetting_and_retention']
+
+ age_range_gender_cols = ['age_range']
+
+ for batch in range(1, 9):
+ batch = str(batch)
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ summ = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+ age_range_gender_csv = os.path.join(scored_dir, 'mturk_batch' + batch + '_age_range_gender.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ # demo_cols.extend([x for x in demo_df.columns.tolist() if ('latency' not in x and 'online' not in x and 'Unnamed' not in x and 'time_comp' not in x and 'subj_id' not in x)])
+ demo_cols.extend([x for x in demo_df.columns.tolist() if
+ ('latency' not in x and 'Unnamed' not in x and 'subj_id' not in x and 'age_textbox')])
+ print batch
+ age_range_df = pd.read_csv(age_range_gender_csv)
+ age_range_gender_cols.extend(
+ [x for x in age_range_df.columns.tolist() if ('age' not in x and 'subj_id' not in x and 'gender' not in x)])
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ clin_raw_cols.extend(
+ [x for x in clin_raw_df.columns.tolist() if 'latency' not in x and 'end' not in x and 'Unnamed' not in x])
+ sum_df = pd.read_csv(summ, dtype=str)
+ scored_df = pd.read_csv(scored, dtype=str)
+ comp_df = pd.read_csv(composite, dtype=str).rename(index=str, columns={'Unnamed: 0': 'subject'})
+ age_range_gender = pd.read_csv(age_range_gender_csv, dtype=str)
+
+ demo_cols = list(set(demo_cols))
+ clin_raw_cols = list(set(clin_raw_cols))
+
+ return demo_cols, clin_raw_cols
+
+
+# need to get latency values,
+# use the scored to set the subject ids.
+# append composite to scored_cols since they're in the same order and composite doesn't have subject ids
+# summary - use script.subjectid
+# demo - use subject
+# clin_raw - use subject
+
+# In[ ]:
+
+
+import numpy as np
+
+scored_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+latency_csv = os.path.join(scored_dir, 'vmreact_latency_summary.csv')
+
+
+def batch_merge(scored_dir, latency_csv):
+ for batch in range(1, 9):
+ # for batch in [8]:
+ batch_df = pd.DataFrame()
+ batch = str(batch)
+ print 'mturk_batch' + batch
+
+ demo = os.path.join(scored_dir, 'mturk_batch' + batch + '_demo.csv')
+ clin_raw = os.path.join(scored_dir, 'mturk_batch' + batch + '_end.csv')
+ sum = os.path.join(scored_dir, 'mturk_batch' + batch + '_summary.csv')
+ scored = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data.csv')
+ primacy = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data_primacy.csv')
+ recency = os.path.join(scored_dir, 'mturk_batch' + batch + '_scored_data_recency.csv')
+ composite = os.path.join(scored_dir, 'mturk_batch' + batch + '_composite_scores.csv')
+
+ demo_df = pd.read_csv(demo, dtype=str)
+ clin_raw_df = pd.read_csv(clin_raw, dtype=str)
+ sum_df = pd.read_csv(sum, dtype=str).rename(index=str, columns={'script.subjectid': 'subject'})
+ scored_df = pd.read_csv(scored)
+
+ primacy_df = pd.read_csv(primacy, dtype=str)
+ recency_df = pd.read_csv(recency, dtype=str)
+
+ extra_measures = primacy_df.merge(recency_df, on='subj_id', left_index=True, how='left',
+ suffixes=('_primacy', '_recency')).rename(columns={'subj_id': 'subject'})
+ comp_df = pd.read_csv(composite).rename(index=str, columns={'Unnamed: 0': 'subject'})
+ comp_df['subject'] = comp_df['subject'].apply(int)
+
+ vmreact_df = pd.merge(scored_df, comp_df, left_index=True, right_on='subject', how='left').drop('subject',
+ axis=1)
+ vmreact_df['subj_id'] = vmreact_df['subj_id'].astype(str)
+
+ # vmreact_df['subj_id']=vmreact_df['subj_id'].apply(pd.to_numeric)
+ latency_df = pd.read_csv(latency_csv, dtype=str)
+ latency_df = latency_df.drop_duplicates().reset_index()
+
+ subject_ids = vmreact_df['subj_id'].tolist()
+
+ vmreact_df = vmreact_df.merge(extra_measures, left_on='subj_id', right_on='subject').drop('subject', axis=1)
+
+ batch_demo_cols = [x for x in demo_df.columns.tolist() if x in demo_cols]
+ append_demo_cols = [x for x in demo_cols if x not in demo_df.columns.tolist()]
+ demo_df = demo_df[demo_df['subject'].astype(str).isin(subject_ids)][batch_demo_cols]
+
+ for col in append_demo_cols:
+ demo_df[col] = np.nan
+ # print demo_df
+ # demo_df['subject']=demo_df['subject'].apply(pd.to_numeric)
+
+ batch_clin_cols = [x for x in clin_raw_df.columns.tolist() if x in clin_raw_cols]
+ append_clin_cols = [x for x in clin_raw_cols if x not in clin_raw_df.columns.tolist()]
+ clin_raw_df = clin_raw_df[clin_raw_df['subject'].astype(str).isin(subject_ids)][batch_clin_cols]
+ for col in sorted(append_clin_cols):
+ clin_raw_df[col] = np.nan
+ # clin_raw_df['subject']=clin_raw_df['subject'].apply(pd.to_numeric)
+
+ batch_sum_cols = [x for x in sum_df.columns.tolist() if x in sum_cols]
+ append_sum_cols = [x for x in sum_cols if x not in sum_df.columns.tolist()]
+ sum_df = sum_df[sum_df['subject'].astype(str).isin(subject_ids)][batch_sum_cols]
+ for col in sorted(append_sum_cols):
+ sum_df[col] = np.nan
+ # sum_df['subject']=sum_df['subject'].apply(pd.to_numeric)
+
+ batch_df = demo_df.merge(sum_df, left_on='subject', right_on='subject').drop(
+ ['script.startdate', 'script.starttime'], axis=1)
+ batch_df = batch_df.merge(clin_raw_df, left_on='subject', right_on='subject').drop(
+ ['date_y', 'time_y', 'group_y', 'build_y'], axis=1)
+ batch_df = batch_df.merge(vmreact_df, left_on='subject', right_on='subj_id').drop('subj_id', axis=1)
+ batch_df = batch_df.rename(columns={'date_x': 'date', 'time_x': 'time', 'group_x': 'group', 'build_x': 'build'})
+ # print batch_df
+
+ print subject_ids
+ latency_df['subjid'] = latency_df['subjid'].astype(str)
+ latency_df['date'] = latency_df['date'].astype(int)
+ batch_df['date'] = batch_df['date'].astype(int)
+
+ latency_df = latency_df.loc[(latency_df['subjid'].isin(
+ batch_df['subject'].astype(str).tolist()))] # & latency_df['date'].isin(batch_df['date'].tolist()))]
+
+ latency_df = latency_df.loc[(
+ latency_df['subjid'].isin(batch_df['subject'].astype(str).tolist()) & latency_df['date'].isin(
+ batch_df['date'].tolist()))]
+
+ batch_df['subject'] = batch_df['subject'].astype(str)
+ batch_df = batch_df.merge(latency_df, left_on='subject', right_on='subjid')
+
+ batch_df.to_csv(os.path.join(scored_dir, 'mturk_batch' + batch + '_compiled.csv'))
+ # os.system('open /Users/cdla/Desktop/scratch/vmreact/2_vmreact/'+'mturk_batch'+batch+'_compiled.csv')
+ return batch_df
+
+
+# In[ ]:
+
+# concat all merged df csvs to 1 final compiled csv
+def conc_all_merged_df(compiled_csv):
+ dataframes_to_concat = []
+ result = []
+ for compiled_csv in glob(os.path.join(scored_dir, '*compiled.csv')):
+ df = pd.read_csv(compiled_csv, dtype=str)
+ dataframes_to_concat.append(df)
+
+ result = pd.concat(dataframes_to_concat).reindex_axis(df.columns.tolist(), axis=1).drop(
+ ['index', 'date_y', 'subjid', 'Unnamed: 0'], axis=1).dropna(how='all', axis=1).drop_duplicates()
+
+ # print result.subject
+ result = result[~result.subject.isin(['XXX', 'AVD6HMIO1HLFI', 'A5EU1AQJNC7F2'])]
+ result.drop_duplicates(['date_x', 'subject'], inplace=True)
+ display(result)
+ result = result.drop_duplicates()
+ result.to_csv(os.path.join(scored_dir, 'mturk_vmreact_complete_compilation.csv'), index=False)
+ return result
+
+# In[ ]:
+
+
+# In[ ]:
diff --git a/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_extra_measures_combined.py b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_extra_measures_combined.py
new file mode 100755
index 0000000..7760a4c
--- /dev/null
+++ b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_extra_measures_combined.py
@@ -0,0 +1,31 @@
+import os
+
+import pandas as pd
+
+data_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+compiled = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/mturk_vmreact_complete_compilation_initiation.csv'
+av_typing = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/typing_test_averages.csv'
+trials = ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']
+cols = ['values.response_latency', 'expressions.trial_recall_word_latency',
+ 'values.recall_firstcharlatency', 'values.recall_lastcharlatency']
+column_titles = ['subjid', 'date']
+
+incorrect = pd.read_csv(os.path.join(data_dir, 'incorrect_response.csv'), dtype=str)
+vmreact_df = pd.read_csv(compiled, dtype='str')
+
+avg_typing_df = pd.read_csv(av_typing, dtype='str')
+
+vmreact_df['unique_identifier'] = vmreact_df['subject'] + '_' + vmreact_df['date']
+df2 = vmreact_df.merge(avg_typing_df, left_on='unique_identifier', right_on='unique_identifier', how='outer')
+new_compiled = pd.DataFrame(data=df2, dtype=str)
+new_compiled["total_average_repeats"] = new_compiled.loc[:, 'listb_#_repeats':'trial7_#_repeats'].astype(float).mean(
+ axis=1)
+new_compiled["total_incorrect"] = new_compiled.loc[:, 'listb':'trial7'].astype(float).subtract(15, axis=0)
+new_compiled[['listb_errors', 'trial1_errors', 'trial2_errors', 'trial3_errors', 'trial4_errors', 'trial5_errors',
+ 'trial6_errors', 'trial7_errors']] = new_compiled.loc[:, 'listb':'trial7'].astype(float).subtract(15,
+ axis=0).abs()
+
+# for x,y in incorrect.groupby(['subj_id','trial']):
+# print x,y.score.value_counts().T
+
+new_compiled.to_csv('updated_mturk_vmreact_complete_compilation_initiation.csv')
diff --git a/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_gen_normed_tables.py b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_gen_normed_tables.py
new file mode 100755
index 0000000..bed799e
--- /dev/null
+++ b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_gen_normed_tables.py
@@ -0,0 +1,51 @@
+import os
+from glob import glob
+
+import pandas as pd
+
+data_dir = '/Users/lillyel-said/Desktop/vmreact/vmreact/2_vmreact/'
+
+cols = ['date', 'subject', 'trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7',
+ 'total_learning', 'corrected_total_learning', 'learning_rate', 'proactive_interference',
+ 'retroactive_interference', 'forgetting_and_retention']
+trials = ['trial1', 'trial2', 'trial3', 'trial4', 'trial5', 'listb', 'trial6', 'trial7']
+
+for compiled_file in glob(os.path.join(data_dir, 'mturk_vmreact_complete_compilation_initiation.csv')):
+ vmreact_compiled = pd.read_csv(compiled_file, dtype=str, index_col=['gender', 'age_range'])
+ bysubj = pd.read_csv(compiled_file, dtype='str')
+ test_df = vmreact_compiled.loc[:, 'list_type':'trial7_values.recall_lastcharlatency']
+ for t in trials:
+ for subj, subj_df in vmreact_compiled.groupby(level=[0, 1]):
+ if len(subj_df) > 3:
+ try:
+ response_latency = subj[0], subj[1], t + '_values.response_latency', round(
+ subj_df[t + '_values.response_latency'].astype(float).mean(axis=0), 4), round(
+ subj_df[t + '_values.response_latency'].astype(float).std(axis=0), 4), subj_df[
+ t + '_values.response_latency'].count()
+ initiation = subj[0], subj[1], t + '_values.recall_firstcharlatency', round(
+ subj_df[t + '_values.recall_firstcharlatency'].astype(float).mean(axis=0), 4), round(
+ subj_df[t + '_recency'].astype(float).std(axis=0), 4), subj_df[t + '_recency'].count()
+ repeats = subj[0], subj[1], t + '_#_repeats', round(
+ subj_df[t + '_#_repeats'].astype(float).mean(axis=0), 4), round(
+ subj_df[t + '_#_repeats'].astype(float).std(axis=0), 4), subj_df[t + '_#_repeats'].count()
+ trials = subj[0], subj[1], t, round(subj_df[t].astype(float).mean(axis=0), 4), round(
+ subj_df[t].astype(float).std(axis=0), 4), subj_df[t].count()
+ primacy = subj[0], subj[1], t + '_primacy', round(
+ subj_df[t + '_primacy'].astype(float).mean(axis=0), 4), round(
+ subj_df[t + '_primacy'].astype(float).std(axis=0), 4), subj_df[t + '_primacy'].count()
+ recency = subj[0], subj[1], t + '_recency', round(
+ subj_df[t + '_recency'].astype(float).mean(axis=0), 4), round(
+ subj_df[t + '_recency'].astype(float).std(axis=0), 4), subj_df[t + '_recency'].count()
+ composite = subj_df.loc[:, 'total_learning':'forgetting_and_retention'].astype(float)
+ composite_vals = composite.mean(axis=0), composite.std(axis=0), composite.count()
+ comp = composite.mean(axis=0), composite.std(axis=0), composite.count()
+ # # print comp[2].T
+ # print repeats
+ # print response_latency
+ # print initiation
+ # print trials
+ # print primacy
+ # print recency
+ except:
+ continue
+ # firstcharaverages
diff --git a/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_typing_latency_by_subject.py b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_typing_latency_by_subject.py
new file mode 100755
index 0000000..f6eb1e6
--- /dev/null
+++ b/build/lib/vmreact-mturk/post_scoring_compiled_csv/vmreact_typing_latency_by_subject.py
@@ -0,0 +1,14 @@
+import os
+from glob import glob
+
+import pandas as pd
+
+final = []
+data_dir=''
+def typing_test_extraction(data_dir,output_csv_path):
+ for x in glob(os.path.join(data_dir, 'filtered_typing_test.csv')):
+ df = pd.read_csv(x, dtype='str')
+ for i, val in df.groupby(['subject', 'date']):
+ print i[0] + '_' + i[1], i[0], i[1], val['latency'].astype(int).mean()
+ vals=i[0] + '_' + i[1], i[0], i[1], val['latency'].astype(int).mean()
+ vals.to_csv(os.path.join(output_csv_path,'typing_test_average_latency.csv'))
\ No newline at end of file
diff --git a/dist/vmreact-0.1-py2.7.egg b/dist/vmreact-0.1-py2.7.egg
new file mode 100755
index 0000000..5eebb9b
Binary files /dev/null and b/dist/vmreact-0.1-py2.7.egg differ
diff --git a/installation.txt b/installation.txt
new file mode 100755
index 0000000..a1a8af4
--- /dev/null
+++ b/installation.txt
@@ -0,0 +1,3 @@
+to install dependencies run the following command in a terminal window:
+
+1. python setup.py install
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
old mode 100644
new mode 100755
index e5d81aa..0004ef6
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,19 +1,39 @@
-IPython
-pandas
-altgraph==0.10.2
-bdist-mpkg==0.5.0
-bonjour-py==0.3
-macholib==1.5.1
-matplotlib==1.3.1
-modulegraph==0.10.4
-numpy==1.8.0rc1
-py2app==0.7.3
-pyobjc-core==2.5.1
-pyOpenSSL==0.13.1
-pyparsing==2.0.1
-python-dateutil==1.5
-pytz==2013.7
-scipy==0.13.0b1
-six==1.4.1
-xattr==0.6.4
-zope.interface==4.1.1
+prettyplotlib
+appnope==0.1.0
+backports-abc==0.5
+backports.functools-lru-cache==1.5
+backports.shutil-get-terminal-size==1.0.0
+certifi==2018.8.13
+cycler==0.10.0
+decorator==4.3.0
+enum34==1.1.6
+functools32==3.2.3.post2
+futures==3.2.0
+ipython==5.8.0
+ipython-genutils==0.2.0
+kiwisolver==1.0.1
+matplotlib==2.2.3
+mkl-fft==1.0.5
+mkl-random==1.0.1
+numpy==1.15.0
+pandas==0.23.4
+pathlib2==2.3.2
+pexpect==4.6.0
+pickleshare==0.7.4
+pipenv==2018.7.1
+prompt-toolkit==1.0.15
+ptyprocess==0.6.0
+Pygments==2.2.0
+pyparsing==2.2.0
+python-dateutil==2.7.3
+pytz==2018.5
+scandir==1.9.0
+simplegeneric==0.8.1
+singledispatch==3.4.0.3
+six==1.11.0
+subprocess32==3.5.2
+tornado==5.1
+traitlets==4.3.2
+virtualenv==16.0.0
+virtualenv-clone==0.3.0
+wcwidth==0.1.7
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index 2b5717d..e2e0fdf
--- a/setup.py
+++ b/setup.py
@@ -1,188 +1,39 @@
-from setuptools import setup
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+# pip install pandas
+# pip install matplotlib
+# pip install glob
+# pip install IPython
+
+required = [
+ 'certifi==2018.8.13',
+ 'numpy==1.14.3',
+ 'pandas==0.23.4',
+ 'python-dateutil==2.7.3',
+ 'pytz==2018.5',
+ 'six==1.11.0',
+ 'virtualenv==16.0.0'
+]
+
+long_desc = open('README.md').read() + '\n\n' + open('requirements.txt').read() + '\n\n' + open('installation.txt').read()
setup(
name='vmreact',
- version='0',
- packages=['venv.lib.python2.7.distutils', 'venv.lib.python2.7.encodings', 'venv.lib.python2.7.site-packages.pip',
- 'venv.lib.python2.7.site-packages.pip._vendor', 'venv.lib.python2.7.site-packages.pip._vendor.idna',
- 'venv.lib.python2.7.site-packages.pip._vendor.pytoml',
- 'venv.lib.python2.7.site-packages.pip._vendor.certifi',
- 'venv.lib.python2.7.site-packages.pip._vendor.chardet',
- 'venv.lib.python2.7.site-packages.pip._vendor.chardet.cli',
- 'venv.lib.python2.7.site-packages.pip._vendor.distlib',
- 'venv.lib.python2.7.site-packages.pip._vendor.distlib._backport',
- 'venv.lib.python2.7.site-packages.pip._vendor.msgpack',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.util',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.contrib',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.contrib._securetransport',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.packages',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.packages.backports',
- 'venv.lib.python2.7.site-packages.pip._vendor.urllib3.packages.ssl_match_hostname',
- 'venv.lib.python2.7.site-packages.pip._vendor.colorama',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib._trie',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib.filters',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib.treewalkers',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib.treeadapters',
- 'venv.lib.python2.7.site-packages.pip._vendor.html5lib.treebuilders',
- 'venv.lib.python2.7.site-packages.pip._vendor.lockfile',
- 'venv.lib.python2.7.site-packages.pip._vendor.progress',
- 'venv.lib.python2.7.site-packages.pip._vendor.requests',
- 'venv.lib.python2.7.site-packages.pip._vendor.packaging',
- 'venv.lib.python2.7.site-packages.pip._vendor.cachecontrol',
- 'venv.lib.python2.7.site-packages.pip._vendor.cachecontrol.caches',
- 'venv.lib.python2.7.site-packages.pip._vendor.webencodings',
- 'venv.lib.python2.7.site-packages.pip._vendor.pkg_resources',
- 'venv.lib.python2.7.site-packages.pip._internal', 'venv.lib.python2.7.site-packages.pip._internal.req',
- 'venv.lib.python2.7.site-packages.pip._internal.vcs',
- 'venv.lib.python2.7.site-packages.pip._internal.utils',
- 'venv.lib.python2.7.site-packages.pip._internal.models',
- 'venv.lib.python2.7.site-packages.pip._internal.commands',
- 'venv.lib.python2.7.site-packages.pip._internal.operations', 'venv.lib.python2.7.site-packages.enum',
- 'venv.lib.python2.7.site-packages.numpy', 'venv.lib.python2.7.site-packages.numpy.ma',
- 'venv.lib.python2.7.site-packages.numpy.ma.tests', 'venv.lib.python2.7.site-packages.numpy.doc',
- 'venv.lib.python2.7.site-packages.numpy.fft', 'venv.lib.python2.7.site-packages.numpy.fft.tests',
- 'venv.lib.python2.7.site-packages.numpy.lib', 'venv.lib.python2.7.site-packages.numpy.lib.tests',
- 'venv.lib.python2.7.site-packages.numpy.core', 'venv.lib.python2.7.site-packages.numpy.core.tests',
- 'venv.lib.python2.7.site-packages.numpy.f2py', 'venv.lib.python2.7.site-packages.numpy.f2py.tests',
- 'venv.lib.python2.7.site-packages.numpy.tests', 'venv.lib.python2.7.site-packages.numpy.compat',
- 'venv.lib.python2.7.site-packages.numpy.compat.tests', 'venv.lib.python2.7.site-packages.numpy.linalg',
- 'venv.lib.python2.7.site-packages.numpy.linalg.tests', 'venv.lib.python2.7.site-packages.numpy.random',
- 'venv.lib.python2.7.site-packages.numpy.random.tests', 'venv.lib.python2.7.site-packages.numpy.testing',
- 'venv.lib.python2.7.site-packages.numpy.testing.tests',
- 'venv.lib.python2.7.site-packages.numpy.testing._private',
- 'venv.lib.python2.7.site-packages.numpy.distutils',
- 'venv.lib.python2.7.site-packages.numpy.distutils.tests',
- 'venv.lib.python2.7.site-packages.numpy.distutils.command',
- 'venv.lib.python2.7.site-packages.numpy.distutils.fcompiler',
- 'venv.lib.python2.7.site-packages.numpy.matrixlib',
- 'venv.lib.python2.7.site-packages.numpy.matrixlib.tests',
- 'venv.lib.python2.7.site-packages.numpy.polynomial',
- 'venv.lib.python2.7.site-packages.numpy.polynomial.tests', 'venv.lib.python2.7.site-packages.wheel',
- 'venv.lib.python2.7.site-packages.wheel.tool', 'venv.lib.python2.7.site-packages.wheel.signatures',
- 'venv.lib.python2.7.site-packages.pandas', 'venv.lib.python2.7.site-packages.pandas.io',
- 'venv.lib.python2.7.site-packages.pandas.io.sas', 'venv.lib.python2.7.site-packages.pandas.io.json',
- 'venv.lib.python2.7.site-packages.pandas.io.formats',
- 'venv.lib.python2.7.site-packages.pandas.io.msgpack',
- 'venv.lib.python2.7.site-packages.pandas.io.clipboard', 'venv.lib.python2.7.site-packages.pandas.api',
- 'venv.lib.python2.7.site-packages.pandas.api.types',
- 'venv.lib.python2.7.site-packages.pandas.api.extensions', 'venv.lib.python2.7.site-packages.pandas.core',
- 'venv.lib.python2.7.site-packages.pandas.core.util', 'venv.lib.python2.7.site-packages.pandas.core.tools',
- 'venv.lib.python2.7.site-packages.pandas.core.arrays',
- 'venv.lib.python2.7.site-packages.pandas.core.dtypes',
- 'venv.lib.python2.7.site-packages.pandas.core.sparse',
- 'venv.lib.python2.7.site-packages.pandas.core.groupby',
- 'venv.lib.python2.7.site-packages.pandas.core.indexes',
- 'venv.lib.python2.7.site-packages.pandas.core.reshape',
- 'venv.lib.python2.7.site-packages.pandas.core.computation',
- 'venv.lib.python2.7.site-packages.pandas.util', 'venv.lib.python2.7.site-packages.pandas._libs',
- 'venv.lib.python2.7.site-packages.pandas._libs.tslibs', 'venv.lib.python2.7.site-packages.pandas.tests',
- 'venv.lib.python2.7.site-packages.pandas.tests.io',
- 'venv.lib.python2.7.site-packages.pandas.tests.io.sas',
- 'venv.lib.python2.7.site-packages.pandas.tests.io.json',
- 'venv.lib.python2.7.site-packages.pandas.tests.io.parser',
- 'venv.lib.python2.7.site-packages.pandas.tests.io.formats',
- 'venv.lib.python2.7.site-packages.pandas.tests.io.msgpack',
- 'venv.lib.python2.7.site-packages.pandas.tests.api', 'venv.lib.python2.7.site-packages.pandas.tests.util',
- 'venv.lib.python2.7.site-packages.pandas.tests.frame',
- 'venv.lib.python2.7.site-packages.pandas.tests.tools',
- 'venv.lib.python2.7.site-packages.pandas.tests.dtypes',
- 'venv.lib.python2.7.site-packages.pandas.tests.scalar',
- 'venv.lib.python2.7.site-packages.pandas.tests.scalar.period',
- 'venv.lib.python2.7.site-packages.pandas.tests.scalar.interval',
- 'venv.lib.python2.7.site-packages.pandas.tests.scalar.timedelta',
- 'venv.lib.python2.7.site-packages.pandas.tests.scalar.timestamp',
- 'venv.lib.python2.7.site-packages.pandas.tests.series',
- 'venv.lib.python2.7.site-packages.pandas.tests.series.indexing',
- 'venv.lib.python2.7.site-packages.pandas.tests.sparse',
- 'venv.lib.python2.7.site-packages.pandas.tests.sparse.frame',
- 'venv.lib.python2.7.site-packages.pandas.tests.sparse.series',
- 'venv.lib.python2.7.site-packages.pandas.tests.tslibs',
- 'venv.lib.python2.7.site-packages.pandas.tests.generic',
- 'venv.lib.python2.7.site-packages.pandas.tests.groupby',
- 'venv.lib.python2.7.site-packages.pandas.tests.groupby.aggregate',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexes',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexes.period',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexes.interval',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexes.datetimes',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexes.timedeltas',
- 'venv.lib.python2.7.site-packages.pandas.tests.reshape',
- 'venv.lib.python2.7.site-packages.pandas.tests.reshape.merge',
- 'venv.lib.python2.7.site-packages.pandas.tests.tseries',
- 'venv.lib.python2.7.site-packages.pandas.tests.tseries.offsets',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexing',
- 'venv.lib.python2.7.site-packages.pandas.tests.indexing.interval',
- 'venv.lib.python2.7.site-packages.pandas.tests.plotting',
- 'venv.lib.python2.7.site-packages.pandas.tests.extension',
- 'venv.lib.python2.7.site-packages.pandas.tests.extension.base',
- 'venv.lib.python2.7.site-packages.pandas.tests.extension.json',
- 'venv.lib.python2.7.site-packages.pandas.tests.extension.decimal',
- 'venv.lib.python2.7.site-packages.pandas.tests.extension.category',
- 'venv.lib.python2.7.site-packages.pandas.tests.internals',
- 'venv.lib.python2.7.site-packages.pandas.tests.categorical',
- 'venv.lib.python2.7.site-packages.pandas.tests.computation',
- 'venv.lib.python2.7.site-packages.pandas.tools', 'venv.lib.python2.7.site-packages.pandas.types',
- 'venv.lib.python2.7.site-packages.pandas.compat', 'venv.lib.python2.7.site-packages.pandas.compat.numpy',
- 'venv.lib.python2.7.site-packages.pandas.errors', 'venv.lib.python2.7.site-packages.pandas.formats',
- 'venv.lib.python2.7.site-packages.pandas.tseries', 'venv.lib.python2.7.site-packages.pandas.plotting',
- 'venv.lib.python2.7.site-packages.pandas.computation', 'venv.lib.python2.7.site-packages.appnope',
- 'venv.lib.python2.7.site-packages.IPython', 'venv.lib.python2.7.site-packages.IPython.lib',
- 'venv.lib.python2.7.site-packages.IPython.lib.tests', 'venv.lib.python2.7.site-packages.IPython.core',
- 'venv.lib.python2.7.site-packages.IPython.core.tests',
- 'venv.lib.python2.7.site-packages.IPython.core.magics', 'venv.lib.python2.7.site-packages.IPython.utils',
- 'venv.lib.python2.7.site-packages.IPython.utils.tests', 'venv.lib.python2.7.site-packages.IPython.kernel',
- 'venv.lib.python2.7.site-packages.IPython.testing',
- 'venv.lib.python2.7.site-packages.IPython.testing.tests',
- 'venv.lib.python2.7.site-packages.IPython.testing.plugin',
- 'venv.lib.python2.7.site-packages.IPython.external',
- 'venv.lib.python2.7.site-packages.IPython.external.decorators',
- 'venv.lib.python2.7.site-packages.IPython.terminal',
- 'venv.lib.python2.7.site-packages.IPython.terminal.tests',
- 'venv.lib.python2.7.site-packages.IPython.terminal.pt_inputhooks',
- 'venv.lib.python2.7.site-packages.IPython.sphinxext',
- 'venv.lib.python2.7.site-packages.IPython.extensions',
- 'venv.lib.python2.7.site-packages.IPython.extensions.tests', 'venv.lib.python2.7.site-packages.pexpect',
- 'venv.lib.python2.7.site-packages.wcwidth', 'venv.lib.python2.7.site-packages.wcwidth.tests',
- 'venv.lib.python2.7.site-packages.dateutil', 'venv.lib.python2.7.site-packages.dateutil.zoneinfo',
- 'venv.lib.python2.7.site-packages.pathlib2', 'venv.lib.python2.7.site-packages.pygments',
- 'venv.lib.python2.7.site-packages.pygments.lexers', 'venv.lib.python2.7.site-packages.pygments.styles',
- 'venv.lib.python2.7.site-packages.pygments.filters',
- 'venv.lib.python2.7.site-packages.pygments.formatters', 'venv.lib.python2.7.site-packages.backports',
- 'venv.lib.python2.7.site-packages.backports.shutil_get_terminal_size',
- 'venv.lib.python2.7.site-packages.traitlets', 'venv.lib.python2.7.site-packages.traitlets.tests',
- 'venv.lib.python2.7.site-packages.traitlets.utils',
- 'venv.lib.python2.7.site-packages.traitlets.utils.tests',
- 'venv.lib.python2.7.site-packages.traitlets.config',
- 'venv.lib.python2.7.site-packages.traitlets.config.tests', 'venv.lib.python2.7.site-packages.ptyprocess',
- 'venv.lib.python2.7.site-packages.setuptools', 'venv.lib.python2.7.site-packages.setuptools.extern',
- 'venv.lib.python2.7.site-packages.setuptools._vendor',
- 'venv.lib.python2.7.site-packages.setuptools._vendor.packaging',
- 'venv.lib.python2.7.site-packages.setuptools.command', 'venv.lib.python2.7.site-packages.pkg_resources',
- 'venv.lib.python2.7.site-packages.pkg_resources.extern',
- 'venv.lib.python2.7.site-packages.pkg_resources._vendor',
- 'venv.lib.python2.7.site-packages.pkg_resources._vendor.packaging',
- 'venv.lib.python2.7.site-packages.prompt_toolkit',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.layout',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.styles',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.contrib',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.contrib.telnet',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.contrib.completers',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.contrib.validators',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.contrib.regular_languages',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.filters',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.terminal',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.clipboard',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.eventloop',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.key_binding',
- 'venv.lib.python2.7.site-packages.prompt_toolkit.key_binding.bindings',
- 'venv.lib.python2.7.site-packages.ipython_genutils',
- 'venv.lib.python2.7.site-packages.ipython_genutils.tests',
- 'venv.lib.python2.7.site-packages.ipython_genutils.testing',
- 'venv.lib.python2.7.site-packages.shutil_backports'],
- url='',
+ version='0.1',
+ packages=[
+ 'vmreact-master.scripts', 'vmreact-master.scripts.grader', 'vmreact-data-visualization', 'vmreact-merges',
+ 'vmreact-mturk.post_scoring_compiled_csv',
+ ],
+ install_requires=required,
+ platforms='Mac OSx',
+ url='https://github.com/daelsaid/vmreact',
license='',
+ wiki='https://github.com/daelsaid/vmreact/wiki',
author='dawlat el-said',
- author_email='daelsaid@stanford.edu',
- description='vmreact package'
+ author_email='daelsaid@gmail.com',
+ description='Etkin lab VMREACT package',
+ long_description=long_desc
)
diff --git a/venv/.Python b/venv/.Python
deleted file mode 120000
index cc24a1e..0000000
--- a/venv/.Python
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/Python
\ No newline at end of file
diff --git a/venv/bin/activate b/venv/bin/activate
deleted file mode 100644
index 770c846..0000000
--- a/venv/bin/activate
+++ /dev/null
@@ -1,78 +0,0 @@
-# This file must be used with "source bin/activate" *from bash*
-# you cannot run it directly
-
-deactivate () {
- unset -f pydoc >/dev/null 2>&1
-
- # reset old environment variables
- # ! [ -z ${VAR+_} ] returns true if VAR is declared at all
- if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then
- PATH="$_OLD_VIRTUAL_PATH"
- export PATH
- unset _OLD_VIRTUAL_PATH
- fi
- if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
- PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
- export PYTHONHOME
- unset _OLD_VIRTUAL_PYTHONHOME
- fi
-
- # This should detect bash and zsh, which have a hash command that must
- # be called to get it to forget past commands. Without forgetting
- # past commands the $PATH changes we made may not be respected
- if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
- hash -r 2>/dev/null
- fi
-
- if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
- PS1="$_OLD_VIRTUAL_PS1"
- export PS1
- unset _OLD_VIRTUAL_PS1
- fi
-
- unset VIRTUAL_ENV
- if [ ! "${1-}" = "nondestructive" ] ; then
- # Self destruct!
- unset -f deactivate
- fi
-}
-
-# unset irrelevant variables
-deactivate nondestructive
-
-VIRTUAL_ENV="/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv"
-export VIRTUAL_ENV
-
-_OLD_VIRTUAL_PATH="$PATH"
-PATH="$VIRTUAL_ENV/bin:$PATH"
-export PATH
-
-# unset PYTHONHOME if set
-if ! [ -z "${PYTHONHOME+_}" ] ; then
- _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
- unset PYTHONHOME
-fi
-
-if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
- _OLD_VIRTUAL_PS1="$PS1"
- if [ "x" != x ] ; then
- PS1="$PS1"
- else
- PS1="(`basename \"$VIRTUAL_ENV\"`) $PS1"
- fi
- export PS1
-fi
-
-# Make sure to unalias pydoc if it's already there
-alias pydoc 2>/dev/null >/dev/null && unalias pydoc
-
-pydoc () {
- python -m pydoc "$@"
-}
-
-# This should detect bash and zsh, which have a hash command that must
-# be called to get it to forget past commands. Without forgetting
-# past commands the $PATH changes we made may not be respected
-if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
- hash -r 2>/dev/null
-fi
diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh
deleted file mode 100644
index e4557d5..0000000
--- a/venv/bin/activate.csh
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file must be used with "source bin/activate.csh" *from csh*.
-# You cannot run it directly.
-# Created by Davide Di Blasi .
-
-alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
-
-# Unset irrelevant variables.
-deactivate nondestructive
-
-setenv VIRTUAL_ENV "/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv"
-
-set _OLD_VIRTUAL_PATH="$PATH"
-setenv PATH "$VIRTUAL_ENV/bin:$PATH"
-
-
-
-if ("" != "") then
- set env_name = ""
-else
- set env_name = `basename "$VIRTUAL_ENV"`
-endif
-
-# Could be in a non-interactive environment,
-# in which case, $prompt is undefined and we wouldn't
-# care about the prompt anyway.
-if ( $?prompt ) then
- set _OLD_VIRTUAL_PROMPT="$prompt"
- set prompt = "[$env_name] $prompt"
-endif
-
-unset env_name
-
-alias pydoc python -m pydoc
-
-rehash
-
diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish
deleted file mode 100644
index 800bce8..0000000
--- a/venv/bin/activate.fish
+++ /dev/null
@@ -1,76 +0,0 @@
-# This file must be used using `. bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
-# Do not run it directly.
-
-function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
- # reset old environment variables
- if test -n "$_OLD_VIRTUAL_PATH"
- set -gx PATH $_OLD_VIRTUAL_PATH
- set -e _OLD_VIRTUAL_PATH
- end
-
- if test -n "$_OLD_VIRTUAL_PYTHONHOME"
- set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
- set -e _OLD_VIRTUAL_PYTHONHOME
- end
-
- if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
- # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
- set -l fish_function_path
-
- # Erase virtualenv's `fish_prompt` and restore the original.
- functions -e fish_prompt
- functions -c _old_fish_prompt fish_prompt
- functions -e _old_fish_prompt
- set -e _OLD_FISH_PROMPT_OVERRIDE
- end
-
- set -e VIRTUAL_ENV
-
- if test "$argv[1]" != 'nondestructive'
- # Self-destruct!
- functions -e pydoc
- functions -e deactivate
- end
-end
-
-# Unset irrelevant variables.
-deactivate nondestructive
-
-set -gx VIRTUAL_ENV "/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv"
-
-set -gx _OLD_VIRTUAL_PATH $PATH
-set -gx PATH "$VIRTUAL_ENV/bin" $PATH
-
-# Unset `$PYTHONHOME` if set.
-if set -q PYTHONHOME
- set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
- set -e PYTHONHOME
-end
-
-function pydoc
- python -m pydoc $argv
-end
-
-if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
- # Copy the current `fish_prompt` function as `_old_fish_prompt`.
- functions -c fish_prompt _old_fish_prompt
-
- function fish_prompt
- # Save the current $status, for fish_prompts that display it.
- set -l old_status $status
-
- # Prompt override provided?
- # If not, just prepend the environment name.
- if test -n ""
- printf '%s%s' "" (set_color normal)
- else
- printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV")
- end
-
- # Restore the original $status
- echo "exit $old_status" | source
- _old_fish_prompt
- end
-
- set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
-end
diff --git a/venv/bin/activate_this.py b/venv/bin/activate_this.py
deleted file mode 100644
index 6a9c42f..0000000
--- a/venv/bin/activate_this.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""By using execfile(this_file, dict(__file__=this_file)) you will
-activate this virtualenv environment.
-
-This can be used when you must use an existing Python interpreter, not
-the virtualenv bin/python
-"""
-
-try:
- __file__
-except NameError:
- raise AssertionError(
- "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))")
-import os
-import sys
-
-old_os_path = os.environ.get('PATH', '')
-os.environ['PATH'] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
-base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-if sys.platform == 'win32':
- site_packages = os.path.join(base, 'Lib', 'site-packages')
-else:
- site_packages = os.path.join(base, 'lib', 'python%s' % sys.version[:3], 'site-packages')
-prev_sys_path = list(sys.path)
-import site
-
-site.addsitedir(site_packages)
-sys.real_prefix = sys.prefix
-sys.prefix = base
-# Move the added items to the front of the path:
-new_sys_path = []
-for item in list(sys.path):
- if item not in prev_sys_path:
- new_sys_path.append(item)
- sys.path.remove(item)
-sys.path[:0] = new_sys_path
diff --git a/venv/bin/conv-template b/venv/bin/conv-template
deleted file mode 100755
index ad40da1..0000000
--- a/venv/bin/conv-template
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from numpy.distutils.conv_template import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/easy_install b/venv/bin/easy_install
deleted file mode 100755
index fcbf92d..0000000
--- a/venv/bin/easy_install
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from setuptools.command.easy_install import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/easy_install-2.7 b/venv/bin/easy_install-2.7
deleted file mode 100755
index fcbf92d..0000000
--- a/venv/bin/easy_install-2.7
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from setuptools.command.easy_install import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/f2py b/venv/bin/f2py
deleted file mode 100755
index 5beac34..0000000
--- a/venv/bin/f2py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from numpy.f2py.__main__ import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/from-template b/venv/bin/from-template
deleted file mode 100755
index f402203..0000000
--- a/venv/bin/from-template
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from numpy.distutils.from_template import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/iptest b/venv/bin/iptest
deleted file mode 100755
index 5ade17b..0000000
--- a/venv/bin/iptest
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from IPython.testing.iptestcontroller import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/iptest2 b/venv/bin/iptest2
deleted file mode 100755
index 5ade17b..0000000
--- a/venv/bin/iptest2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from IPython.testing.iptestcontroller import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/ipython b/venv/bin/ipython
deleted file mode 100755
index d0157f2..0000000
--- a/venv/bin/ipython
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from IPython import start_ipython
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(start_ipython())
diff --git a/venv/bin/ipython2 b/venv/bin/ipython2
deleted file mode 100755
index d0157f2..0000000
--- a/venv/bin/ipython2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from IPython import start_ipython
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(start_ipython())
diff --git a/venv/bin/pip b/venv/bin/pip
deleted file mode 100755
index acd90fc..0000000
--- a/venv/bin/pip
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip._internal import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/pip2 b/venv/bin/pip2
deleted file mode 100755
index acd90fc..0000000
--- a/venv/bin/pip2
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip._internal import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/pip2.7 b/venv/bin/pip2.7
deleted file mode 100755
index acd90fc..0000000
--- a/venv/bin/pip2.7
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pip._internal import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/pygmentize b/venv/bin/pygmentize
deleted file mode 100755
index 445cc4b..0000000
--- a/venv/bin/pygmentize
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from pygments.cmdline import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/bin/python b/venv/bin/python
deleted file mode 100755
index 947e565..0000000
Binary files a/venv/bin/python and /dev/null differ
diff --git a/venv/bin/python-config b/venv/bin/python-config
deleted file mode 100755
index 99faff3..0000000
--- a/venv/bin/python-config
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-import getopt
-import sys
-import sysconfig
-
-valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
- 'ldflags', 'help']
-
-if sys.version_info >= (3, 2):
- valid_opts.insert(-1, 'extension-suffix')
- valid_opts.append('abiflags')
-if sys.version_info >= (3, 3):
- valid_opts.append('configdir')
-
-
-def exit_with_usage(code=1):
- sys.stderr.write("Usage: {0} [{1}]\n".format(
- sys.argv[0], '|'.join('--' + opt for opt in valid_opts)))
- sys.exit(code)
-
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
-except getopt.error:
- exit_with_usage()
-
-if not opts:
- exit_with_usage()
-
-pyver = sysconfig.get_config_var('VERSION')
-getvar = sysconfig.get_config_var
-
-opt_flags = [flag for (flag, val) in opts]
-
-if '--help' in opt_flags:
- exit_with_usage(code=0)
-
-for opt in opt_flags:
- if opt == '--prefix':
- print(sysconfig.get_config_var('prefix'))
-
- elif opt == '--exec-prefix':
- print(sysconfig.get_config_var('exec_prefix'))
-
- elif opt in ('--includes', '--cflags'):
- flags = ['-I' + sysconfig.get_path('include'),
- '-I' + sysconfig.get_path('platinclude')]
- if opt == '--cflags':
- flags.extend(getvar('CFLAGS').split())
- print(' '.join(flags))
-
- elif opt in ('--libs', '--ldflags'):
- abiflags = getattr(sys, 'abiflags', '')
- libs = ['-lpython' + pyver + abiflags]
- libs += getvar('LIBS').split()
- libs += getvar('SYSLIBS').split()
- # add the prefix/lib/pythonX.Y/config dir, but only if there is no
- # shared library in prefix/lib/.
- if opt == '--ldflags':
- if not getvar('Py_ENABLE_SHARED'):
- libs.insert(0, '-L' + getvar('LIBPL'))
- if not getvar('PYTHONFRAMEWORK'):
- libs.extend(getvar('LINKFORSHARED').split())
- print(' '.join(libs))
-
- elif opt == '--extension-suffix':
- ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
- if ext_suffix is None:
- ext_suffix = sysconfig.get_config_var('SO')
- print(ext_suffix)
-
- elif opt == '--abiflags':
- if not getattr(sys, 'abiflags', None):
- exit_with_usage()
- print(sys.abiflags)
-
- elif opt == '--configdir':
- print(sysconfig.get_config_var('LIBPL'))
diff --git a/venv/bin/python2 b/venv/bin/python2
deleted file mode 120000
index d8654aa..0000000
--- a/venv/bin/python2
+++ /dev/null
@@ -1 +0,0 @@
-python
\ No newline at end of file
diff --git a/venv/bin/python2.7 b/venv/bin/python2.7
deleted file mode 120000
index d8654aa..0000000
--- a/venv/bin/python2.7
+++ /dev/null
@@ -1 +0,0 @@
-python
\ No newline at end of file
diff --git a/venv/bin/wheel b/venv/bin/wheel
deleted file mode 100755
index 1ef16a8..0000000
--- a/venv/bin/wheel
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/Users/lillyel-said/Desktop/stanford/scripts/projects/vmreact/venv/bin/python
-
-# -*- coding: utf-8 -*-
-import re
-import sys
-
-from wheel.tool import main
-
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
- sys.exit(main())
diff --git a/venv/include/python2.7 b/venv/include/python2.7
deleted file mode 120000
index 3fe034f..0000000
--- a/venv/include/python2.7
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7
\ No newline at end of file
diff --git a/venv/lib/python2.7/UserDict.py b/venv/lib/python2.7/UserDict.py
deleted file mode 120000
index b735f02..0000000
--- a/venv/lib/python2.7/UserDict.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/UserDict.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/UserDict.pyc b/venv/lib/python2.7/UserDict.pyc
deleted file mode 100644
index 7f3579a..0000000
Binary files a/venv/lib/python2.7/UserDict.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/_abcoll.py b/venv/lib/python2.7/_abcoll.py
deleted file mode 120000
index 4a595bc..0000000
--- a/venv/lib/python2.7/_abcoll.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_abcoll.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/_abcoll.pyc b/venv/lib/python2.7/_abcoll.pyc
deleted file mode 100644
index 9d40290..0000000
Binary files a/venv/lib/python2.7/_abcoll.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/_weakrefset.py b/venv/lib/python2.7/_weakrefset.py
deleted file mode 120000
index b8b09b7..0000000
--- a/venv/lib/python2.7/_weakrefset.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_weakrefset.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/_weakrefset.pyc b/venv/lib/python2.7/_weakrefset.pyc
deleted file mode 100644
index e468cf2..0000000
Binary files a/venv/lib/python2.7/_weakrefset.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/abc.py b/venv/lib/python2.7/abc.py
deleted file mode 120000
index 87956e5..0000000
--- a/venv/lib/python2.7/abc.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/abc.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/abc.pyc b/venv/lib/python2.7/abc.pyc
deleted file mode 100644
index 170e355..0000000
Binary files a/venv/lib/python2.7/abc.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/codecs.py b/venv/lib/python2.7/codecs.py
deleted file mode 120000
index b18c8d6..0000000
--- a/venv/lib/python2.7/codecs.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/codecs.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/codecs.pyc b/venv/lib/python2.7/codecs.pyc
deleted file mode 100644
index 66b45f6..0000000
Binary files a/venv/lib/python2.7/codecs.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/config b/venv/lib/python2.7/config
deleted file mode 120000
index 88ddfa1..0000000
--- a/venv/lib/python2.7/config
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config
\ No newline at end of file
diff --git a/venv/lib/python2.7/copy_reg.py b/venv/lib/python2.7/copy_reg.py
deleted file mode 120000
index 8d0265c..0000000
--- a/venv/lib/python2.7/copy_reg.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/copy_reg.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/copy_reg.pyc b/venv/lib/python2.7/copy_reg.pyc
deleted file mode 100644
index 13c31c9..0000000
Binary files a/venv/lib/python2.7/copy_reg.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/distutils/__init__.py b/venv/lib/python2.7/distutils/__init__.py
deleted file mode 100644
index 06802ff..0000000
--- a/venv/lib/python2.7/distutils/__init__.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import imp
-import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
-import os
-import sys
-import warnings
-
-# Important! To work on pypy, this must be a module that resides in the
-# lib-python/modified-x.y.z directory
-
-dirname = os.path.dirname
-
-distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
-if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
- warnings.warn(
- "The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
-else:
- __path__.insert(0, distutils_path)
- real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
- # Copy the relevant attributes
- try:
- __revision__ = real_distutils.__revision__
- except AttributeError:
- pass
- __version__ = real_distutils.__version__
-
-from distutils import dist, sysconfig
-
-try:
- basestring
-except NameError:
- basestring = str
-
-## patch build_ext (distutils doesn't know how to get the libs directory
-## path on windows - it hardcodes the paths around the patched sys.prefix)
-
-if sys.platform == 'win32':
- from distutils.command.build_ext import build_ext as old_build_ext
-
-
- class build_ext(old_build_ext):
- def finalize_options(self):
- if self.library_dirs is None:
- self.library_dirs = []
- elif isinstance(self.library_dirs, basestring):
- self.library_dirs = self.library_dirs.split(os.pathsep)
-
- self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
- old_build_ext.finalize_options(self)
-
-
- from distutils.command import build_ext as build_ext_module
-
- build_ext_module.build_ext = build_ext
-
-## distutils.dist patches:
-
-old_find_config_files = dist.Distribution.find_config_files
-
-
-def find_config_files(self):
- found = old_find_config_files(self)
- system_distutils = os.path.join(distutils_path, 'distutils.cfg')
- # if os.path.exists(system_distutils):
- # found.insert(0, system_distutils)
- # What to call the per-user config file
- if os.name == 'posix':
- user_filename = ".pydistutils.cfg"
- else:
- user_filename = "pydistutils.cfg"
- user_filename = os.path.join(sys.prefix, user_filename)
- if os.path.isfile(user_filename):
- for item in list(found):
- if item.endswith('pydistutils.cfg'):
- found.remove(item)
- found.append(user_filename)
- return found
-
-
-dist.Distribution.find_config_files = find_config_files
-
-## distutils.sysconfig patches:
-
-old_get_python_inc = sysconfig.get_python_inc
-
-
-def sysconfig_get_python_inc(plat_specific=0, prefix=None):
- if prefix is None:
- prefix = sys.real_prefix
- return old_get_python_inc(plat_specific, prefix)
-
-
-sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
-sysconfig.get_python_inc = sysconfig_get_python_inc
-
-old_get_python_lib = sysconfig.get_python_lib
-
-
-def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
- if standard_lib and prefix is None:
- prefix = sys.real_prefix
- return old_get_python_lib(plat_specific, standard_lib, prefix)
-
-
-sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
-sysconfig.get_python_lib = sysconfig_get_python_lib
-
-old_get_config_vars = sysconfig.get_config_vars
-
-
-def sysconfig_get_config_vars(*args):
- real_vars = old_get_config_vars(*args)
- if sys.platform == 'win32':
- lib_dir = os.path.join(sys.real_prefix, "libs")
- if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
- real_vars['LIBDIR'] = lib_dir # asked for all
- elif isinstance(real_vars, list) and 'LIBDIR' in args:
- real_vars = real_vars + [lib_dir] # asked for list
- return real_vars
-
-
-sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
-sysconfig.get_config_vars = sysconfig_get_config_vars
diff --git a/venv/lib/python2.7/distutils/__init__.pyc b/venv/lib/python2.7/distutils/__init__.pyc
deleted file mode 100644
index f4ec447..0000000
Binary files a/venv/lib/python2.7/distutils/__init__.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/distutils/distutils.cfg b/venv/lib/python2.7/distutils/distutils.cfg
deleted file mode 100644
index 1af230e..0000000
--- a/venv/lib/python2.7/distutils/distutils.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-# This is a config file local to this virtualenv installation
-# You may include options that will be used by all distutils commands,
-# and by easy_install. For instance:
-#
-# [easy_install]
-# find_links = http://mylocalsite
diff --git a/venv/lib/python2.7/encodings b/venv/lib/python2.7/encodings
deleted file mode 120000
index 8732f85..0000000
--- a/venv/lib/python2.7/encodings
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/encodings
\ No newline at end of file
diff --git a/venv/lib/python2.7/fnmatch.py b/venv/lib/python2.7/fnmatch.py
deleted file mode 120000
index 49b6bc0..0000000
--- a/venv/lib/python2.7/fnmatch.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/fnmatch.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/fnmatch.pyc b/venv/lib/python2.7/fnmatch.pyc
deleted file mode 100644
index c9313f2..0000000
Binary files a/venv/lib/python2.7/fnmatch.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/genericpath.py b/venv/lib/python2.7/genericpath.py
deleted file mode 120000
index 7843bce..0000000
--- a/venv/lib/python2.7/genericpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/genericpath.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/genericpath.pyc b/venv/lib/python2.7/genericpath.pyc
deleted file mode 100644
index 923a836..0000000
Binary files a/venv/lib/python2.7/genericpath.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/lib-dynload b/venv/lib/python2.7/lib-dynload
deleted file mode 120000
index 24c555e..0000000
--- a/venv/lib/python2.7/lib-dynload
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload
\ No newline at end of file
diff --git a/venv/lib/python2.7/linecache.py b/venv/lib/python2.7/linecache.py
deleted file mode 120000
index 1f79a61..0000000
--- a/venv/lib/python2.7/linecache.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/linecache.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/linecache.pyc b/venv/lib/python2.7/linecache.pyc
deleted file mode 100644
index 33b4add..0000000
Binary files a/venv/lib/python2.7/linecache.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/locale.py b/venv/lib/python2.7/locale.py
deleted file mode 120000
index cc8a5a7..0000000
--- a/venv/lib/python2.7/locale.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/locale.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/locale.pyc b/venv/lib/python2.7/locale.pyc
deleted file mode 100644
index 2a34d04..0000000
Binary files a/venv/lib/python2.7/locale.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/ntpath.py b/venv/lib/python2.7/ntpath.py
deleted file mode 120000
index af0bbe7..0000000
--- a/venv/lib/python2.7/ntpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ntpath.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/orig-prefix.txt b/venv/lib/python2.7/orig-prefix.txt
deleted file mode 100644
index 2a45120..0000000
--- a/venv/lib/python2.7/orig-prefix.txt
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7
\ No newline at end of file
diff --git a/venv/lib/python2.7/os.py b/venv/lib/python2.7/os.py
deleted file mode 120000
index 04db928..0000000
--- a/venv/lib/python2.7/os.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/os.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/os.pyc b/venv/lib/python2.7/os.pyc
deleted file mode 100644
index 42d1cec..0000000
Binary files a/venv/lib/python2.7/os.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/posixpath.py b/venv/lib/python2.7/posixpath.py
deleted file mode 120000
index cc89aa2..0000000
--- a/venv/lib/python2.7/posixpath.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/posixpath.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/posixpath.pyc b/venv/lib/python2.7/posixpath.pyc
deleted file mode 100644
index 9bca85f..0000000
Binary files a/venv/lib/python2.7/posixpath.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/re.py b/venv/lib/python2.7/re.py
deleted file mode 120000
index b1a8e65..0000000
--- a/venv/lib/python2.7/re.py
+++ /dev/null
@@ -1 +0,0 @@
-/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/re.py
\ No newline at end of file
diff --git a/venv/lib/python2.7/re.pyc b/venv/lib/python2.7/re.pyc
deleted file mode 100644
index 6b90f36..0000000
Binary files a/venv/lib/python2.7/re.pyc and /dev/null differ
diff --git a/venv/lib/python2.7/site-packages/IPython/__init__.py b/venv/lib/python2.7/site-packages/IPython/__init__.py
deleted file mode 100644
index 2b7cbf8..0000000
--- a/venv/lib/python2.7/site-packages/IPython/__init__.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# encoding: utf-8
-"""
-IPython: tools for interactive and parallel computing in Python.
-
-http://ipython.org
-"""
-# -----------------------------------------------------------------------------
-# Copyright (c) 2008-2011, IPython Development Team.
-# Copyright (c) 2001-2007, Fernando Perez
-# Copyright (c) 2001, Janko Hauser
-# Copyright (c) 2001, Nathaniel Gray
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-from __future__ import absolute_import
-
-import os
-import sys
-import warnings
-
-# -----------------------------------------------------------------------------
-# Setup everything
-# -----------------------------------------------------------------------------
-
-# Don't forget to also update setup.py when this changes!
-v = sys.version_info
-if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 3)):
- raise ImportError('IPython requires Python version 2.7 or 3.3 or above.')
-del v
-
-# Make it easy to import extensions - they are always directly on pythonpath.
-# Therefore, non-IPython modules can be added to extensions directory.
-# This should probably be in ipapp.py.
-sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
-
-# -----------------------------------------------------------------------------
-# Setup the top level names
-# -----------------------------------------------------------------------------
-
-from .core.getipython import get_ipython
-from .core import release
-from .core.application import Application
-from .terminal.embed import embed
-
-from .core.interactiveshell import InteractiveShell
-from .testing import test
-from .utils.sysinfo import sys_info
-from .utils.frame import extract_module_locals
-
-# Release data
-__author__ = '%s <%s>' % (release.author, release.author_email)
-__license__ = release.license
-__version__ = release.version
-version_info = release.version_info
-
-
-def embed_kernel(module=None, local_ns=None, **kwargs):
- """Embed and start an IPython kernel in a given scope.
-
- If you don't want the kernel to initialize the namespace
- from the scope of the surrounding function,
- and/or you want to load full IPython configuration,
- you probably want `IPython.start_kernel()` instead.
-
- Parameters
- ----------
- module : ModuleType, optional
- The module to load into IPython globals (default: caller)
- local_ns : dict, optional
- The namespace to load into IPython user namespace (default: caller)
-
- kwargs : various, optional
- Further keyword args are relayed to the IPKernelApp constructor,
- allowing configuration of the Kernel. Will only have an effect
- on the first embed_kernel call for a given process.
- """
-
- (caller_module, caller_locals) = extract_module_locals(1)
- if module is None:
- module = caller_module
- if local_ns is None:
- local_ns = caller_locals
-
- # Only import .zmq when we really need it
- from ipykernel.embed import embed_kernel as real_embed_kernel
- real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
-
-
-def start_ipython(argv=None, **kwargs):
- """Launch a normal IPython instance (as opposed to embedded)
-
- `IPython.embed()` puts a shell in a particular calling scope,
- such as a function or method for debugging purposes,
- which is often not desirable.
-
- `start_ipython()` does full, regular IPython initialization,
- including loading startup files, configuration, etc.
- much of which is skipped by `embed()`.
-
- This is a public API method, and will survive implementation changes.
-
- Parameters
- ----------
-
- argv : list or None, optional
- If unspecified or None, IPython will parse command-line options from sys.argv.
- To prevent any command-line parsing, pass an empty list: `argv=[]`.
- user_ns : dict, optional
- specify this dictionary to initialize the IPython user namespace with particular values.
- kwargs : various, optional
- Any other kwargs will be passed to the Application constructor,
- such as `config`.
- """
- from IPython.terminal.ipapp import launch_new_instance
- return launch_new_instance(argv=argv, **kwargs)
-
-
-def start_kernel(argv=None, **kwargs):
- """Launch a normal IPython kernel instance (as opposed to embedded)
-
- `IPython.embed_kernel()` puts a shell in a particular calling scope,
- such as a function or method for debugging purposes,
- which is often not desirable.
-
- `start_kernel()` does full, regular IPython initialization,
- including loading startup files, configuration, etc.
- much of which is skipped by `embed()`.
-
- Parameters
- ----------
-
- argv : list or None, optional
- If unspecified or None, IPython will parse command-line options from sys.argv.
- To prevent any command-line parsing, pass an empty list: `argv=[]`.
- user_ns : dict, optional
- specify this dictionary to initialize the IPython user namespace with particular values.
- kwargs : various, optional
- Any other kwargs will be passed to the Application constructor,
- such as `config`.
- """
- from IPython.kernel.zmq.kernelapp import launch_new_instance
- return launch_new_instance(argv=argv, **kwargs)
diff --git a/venv/lib/python2.7/site-packages/IPython/__main__.py b/venv/lib/python2.7/site-packages/IPython/__main__.py
deleted file mode 100644
index 8363d1f..0000000
--- a/venv/lib/python2.7/site-packages/IPython/__main__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# encoding: utf-8
-"""Terminal-based IPython entry point.
-"""
-# -----------------------------------------------------------------------------
-# Copyright (c) 2012, IPython Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-from IPython import start_ipython
-
-start_ipython()
diff --git a/venv/lib/python2.7/site-packages/IPython/config.py b/venv/lib/python2.7/site-packages/IPython/config.py
deleted file mode 100644
index 734147c..0000000
--- a/venv/lib/python2.7/site-packages/IPython/config.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""
-Shim to maintain backwards compatibility with old IPython.config imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import sys
-from warnings import warn
-
-from IPython.utils.shimmodule import ShimModule, ShimWarning
-
-warn("The `IPython.config` package has been deprecated since IPython 4.0. "
- "You should import from traitlets.config instead.", ShimWarning)
-
-# Unconditionally insert the shim into sys.modules so that further import calls
-# trigger the custom attribute access above
-
-sys.modules['IPython.config'] = ShimModule(src='IPython.config', mirror='traitlets.config')
diff --git a/venv/lib/python2.7/site-packages/IPython/consoleapp.py b/venv/lib/python2.7/site-packages/IPython/consoleapp.py
deleted file mode 100644
index 7fc992f..0000000
--- a/venv/lib/python2.7/site-packages/IPython/consoleapp.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-Shim to maintain backwards compatibility with old IPython.consoleapp imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from warnings import warn
-
-warn("The `IPython.consoleapp` package has been deprecated. "
- "You should import from jupyter_client.consoleapp instead.")
-
-from jupyter_client.consoleapp import *
diff --git a/venv/lib/python2.7/site-packages/IPython/core/alias.py b/venv/lib/python2.7/site-packages/IPython/core/alias.py
deleted file mode 100644
index 9309bf3..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/alias.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# encoding: utf-8
-"""
-System command aliases.
-
-Authors:
-
-* Fernando Perez
-* Brian Granger
-"""
-
-# -----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-
-import os
-import re
-import sys
-from logging import error
-
-from IPython.core.error import UsageError
-from IPython.utils.py3compat import string_types
-from traitlets import List, Instance
-from traitlets.config.configurable import Configurable
-
-# -----------------------------------------------------------------------------
-# Utilities
-# -----------------------------------------------------------------------------
-
-# This is used as the pattern for calls to split_user_input.
-shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
-
-
-def default_aliases():
- """Return list of shell aliases to auto-define.
- """
- # Note: the aliases defined here should be safe to use on a kernel
- # regardless of what frontend it is attached to. Frontends that use a
- # kernel in-process can define additional aliases that will only work in
- # their case. For example, things like 'less' or 'clear' that manipulate
- # the terminal should NOT be declared here, as they will only work if the
- # kernel is running inside a true terminal, and not over the network.
-
- if os.name == 'posix':
- default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
- ('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
- ('cat', 'cat'),
- ]
- # Useful set of ls aliases. The GNU and BSD options are a little
- # different, so we make aliases that provide as similar as possible
- # behavior in ipython, by passing the right flags for each platform
- if sys.platform.startswith('linux'):
- ls_aliases = [('ls', 'ls -F --color'),
- # long ls
- ('ll', 'ls -F -o --color'),
- # ls normal files only
- ('lf', 'ls -F -o --color %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -o --color %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -o --color %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -o --color %l | grep ^-..x'),
- ]
- elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
- # OpenBSD, NetBSD. The ls implementation on these platforms do not support
- # the -G switch and lack the ability to use colorized output.
- ls_aliases = [('ls', 'ls -F'),
- # long ls
- ('ll', 'ls -F -l'),
- # ls normal files only
- ('lf', 'ls -F -l %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -l %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -l %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -l %l | grep ^-..x'),
- ]
- else:
- # BSD, OSX, etc.
- ls_aliases = [('ls', 'ls -F -G'),
- # long ls
- ('ll', 'ls -F -l -G'),
- # ls normal files only
- ('lf', 'ls -F -l -G %l | grep ^-'),
- # ls symbolic links
- ('lk', 'ls -F -l -G %l | grep ^l'),
- # directories or links to directories,
- ('ldir', 'ls -F -G -l %l | grep /$'),
- # things which are executable
- ('lx', 'ls -F -l -G %l | grep ^-..x'),
- ]
- default_aliases = default_aliases + ls_aliases
- elif os.name in ['nt', 'dos']:
- default_aliases = [('ls', 'dir /on'),
- ('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
- ('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
- ('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
- ]
- else:
- default_aliases = []
-
- return default_aliases
-
-
-class AliasError(Exception):
- pass
-
-
-class InvalidAliasError(AliasError):
- pass
-
-
-class Alias(object):
- """Callable object storing the details of one alias.
-
- Instances are registered as magic functions to allow use of aliases.
- """
-
- # Prepare blacklist
- blacklist = {'cd', 'popd', 'pushd', 'dhist', 'alias', 'unalias'}
-
- def __init__(self, shell, name, cmd):
- self.shell = shell
- self.name = name
- self.cmd = cmd
- self.__doc__ = "Alias for `!{}`".format(cmd)
- self.nargs = self.validate()
-
- def validate(self):
- """Validate the alias, and return the number of arguments."""
- if self.name in self.blacklist:
- raise InvalidAliasError("The name %s can't be aliased "
- "because it is a keyword or builtin." % self.name)
- try:
- caller = self.shell.magics_manager.magics['line'][self.name]
- except KeyError:
- pass
- else:
- if not isinstance(caller, Alias):
- raise InvalidAliasError("The name %s can't be aliased "
- "because it is another magic command." % self.name)
-
- if not (isinstance(self.cmd, string_types)):
- raise InvalidAliasError("An alias command must be a string, "
- "got: %r" % self.cmd)
-
- nargs = self.cmd.count('%s') - self.cmd.count('%%s')
-
- if (nargs > 0) and (self.cmd.find('%l') >= 0):
- raise InvalidAliasError('The %s and %l specifiers are mutually '
- 'exclusive in alias definitions.')
-
- return nargs
-
- def __repr__(self):
- return "".format(self.name, self.cmd)
-
- def __call__(self, rest=''):
- cmd = self.cmd
- nargs = self.nargs
- # Expand the %l special to be the user's input line
- if cmd.find('%l') >= 0:
- cmd = cmd.replace('%l', rest)
- rest = ''
-
- if nargs == 0:
- if cmd.find('%%s') >= 1:
- cmd = cmd.replace('%%s', '%s')
- # Simple, argument-less aliases
- cmd = '%s %s' % (cmd, rest)
- else:
- # Handle aliases with positional arguments
- args = rest.split(None, nargs)
- if len(args) < nargs:
- raise UsageError('Alias <%s> requires %s arguments, %s given.' %
- (self.name, nargs, len(args)))
- cmd = '%s %s' % (cmd % tuple(args[:nargs]), ' '.join(args[nargs:]))
-
- self.shell.system(cmd)
-
-
-# -----------------------------------------------------------------------------
-# Main AliasManager class
-# -----------------------------------------------------------------------------
-
-class AliasManager(Configurable):
- default_aliases = List(default_aliases()).tag(config=True)
- user_aliases = List(default_value=[]).tag(config=True)
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
-
- def __init__(self, shell=None, **kwargs):
- super(AliasManager, self).__init__(shell=shell, **kwargs)
- # For convenient access
- self.linemagics = self.shell.magics_manager.magics['line']
- self.init_aliases()
-
- def init_aliases(self):
- # Load default & user aliases
- for name, cmd in self.default_aliases + self.user_aliases:
- self.soft_define_alias(name, cmd)
-
- @property
- def aliases(self):
- return [(n, func.cmd) for (n, func) in self.linemagics.items()
- if isinstance(func, Alias)]
-
- def soft_define_alias(self, name, cmd):
- """Define an alias, but don't raise on an AliasError."""
- try:
- self.define_alias(name, cmd)
- except AliasError as e:
- error("Invalid alias: %s" % e)
-
- def define_alias(self, name, cmd):
- """Define a new alias after validating it.
-
- This will raise an :exc:`AliasError` if there are validation
- problems.
- """
- caller = Alias(shell=self.shell, name=name, cmd=cmd)
- self.shell.magics_manager.register_function(caller, magic_kind='line',
- magic_name=name)
-
- def get_alias(self, name):
- """Return an alias, or None if no alias by that name exists."""
- aname = self.linemagics.get(name, None)
- return aname if isinstance(aname, Alias) else None
-
- def is_alias(self, name):
- """Return whether or not a given name has been defined as an alias"""
- return self.get_alias(name) is not None
-
- def undefine_alias(self, name):
- if self.is_alias(name):
- del self.linemagics[name]
- else:
- raise ValueError('%s is not an alias' % name)
-
- def clear_aliases(self):
- for name, cmd in self.aliases:
- self.undefine_alias(name)
-
- def retrieve_alias(self, name):
- """Retrieve the command to which an alias expands."""
- caller = self.get_alias(name)
- if caller:
- return caller.cmd
- else:
- raise ValueError('%s is not an alias' % name)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/application.py b/venv/lib/python2.7/site-packages/IPython/core/application.py
deleted file mode 100644
index 10adf18..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/application.py
+++ /dev/null
@@ -1,475 +0,0 @@
-# encoding: utf-8
-"""
-An application for IPython.
-
-All top-level applications should use the classes in this module for
-handling configuration and creating configurables.
-
-The job of an :class:`Application` is to create the master configuration
-object and then create the configurable objects, passing the config to them.
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-import atexit
-import glob
-import logging
-import os
-import shutil
-import sys
-from copy import deepcopy
-
-from IPython.core import release, crashhandler
-from IPython.core.profiledir import ProfileDir, ProfileDirError
-from IPython.paths import get_ipython_dir, get_ipython_package_dir
-from IPython.utils import py3compat
-from IPython.utils.path import ensure_dir_exists
-from traitlets import (
- List, Unicode, Type, Bool, Dict, Set, Instance, Undefined,
- default, observe,
-)
-from traitlets.config.application import Application, catch_config_error
-from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
-
-if os.name == 'nt':
- programdata = os.environ.get('PROGRAMDATA', None)
- if programdata:
- SYSTEM_CONFIG_DIRS = [os.path.join(programdata, 'ipython')]
- else: # PROGRAMDATA is not defined by default on XP.
- SYSTEM_CONFIG_DIRS = []
-else:
- SYSTEM_CONFIG_DIRS = [
- "/usr/local/etc/ipython",
- "/etc/ipython",
- ]
-
-ENV_CONFIG_DIRS = []
-_env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
-if _env_config_dir not in SYSTEM_CONFIG_DIRS:
- # only add ENV_CONFIG if sys.prefix is not already included
- ENV_CONFIG_DIRS.append(_env_config_dir)
-
-_envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS')
-if _envvar in {None, ''}:
- IPYTHON_SUPPRESS_CONFIG_ERRORS = None
-else:
- if _envvar.lower() in {'1', 'true'}:
- IPYTHON_SUPPRESS_CONFIG_ERRORS = True
- elif _envvar.lower() in {'0', 'false'}:
- IPYTHON_SUPPRESS_CONFIG_ERRORS = False
- else:
- sys.exit(
- "Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}." % _envvar)
-
-# aliases and flags
-
-base_aliases = {
- 'profile-dir': 'ProfileDir.location',
- 'profile': 'BaseIPythonApplication.profile',
- 'ipython-dir': 'BaseIPythonApplication.ipython_dir',
- 'log-level': 'Application.log_level',
- 'config': 'BaseIPythonApplication.extra_config_file',
-}
-
-base_flags = dict(
- debug=({'Application': {'log_level': logging.DEBUG}},
- "set log level to logging.DEBUG (maximize logging output)"),
- quiet=({'Application': {'log_level': logging.CRITICAL}},
- "set log level to logging.CRITICAL (minimize logging output)"),
- init=({'BaseIPythonApplication': {
- 'copy_config_files': True,
- 'auto_create': True}
- }, """Initialize profile with default config files. This is equivalent
- to running `ipython profile create ` prior to startup.
- """)
-)
-
-
-class ProfileAwareConfigLoader(PyFileConfigLoader):
- """A Python file config loader that is aware of IPython profiles."""
-
- def load_subconfig(self, fname, path=None, profile=None):
- if profile is not None:
- try:
- profile_dir = ProfileDir.find_profile_dir_by_name(
- get_ipython_dir(),
- profile,
- )
- except ProfileDirError:
- return
- path = profile_dir.location
- return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
-
-
-class BaseIPythonApplication(Application):
- name = Unicode(u'ipython')
- description = Unicode(u'IPython: an enhanced interactive Python shell.')
- version = Unicode(release.version)
-
- aliases = Dict(base_aliases)
- flags = Dict(base_flags)
- classes = List([ProfileDir])
-
- # enable `load_subconfig('cfg.py', profile='name')`
- python_config_loader_class = ProfileAwareConfigLoader
-
- # Track whether the config_file has changed,
- # because some logic happens only if we aren't using the default.
- config_file_specified = Set()
-
- config_file_name = Unicode()
-
- @default('config_file_name')
- def _config_file_name_default(self):
- return self.name.replace('-', '_') + u'_config.py'
-
- @observe('config_file_name')
- def _config_file_name_changed(self, change):
- if change['new'] != change['old']:
- self.config_file_specified.add(change['new'])
-
- # The directory that contains IPython's builtin profiles.
- builtin_profile_dir = Unicode(
- os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
- )
-
- config_file_paths = List(Unicode())
-
- @default('config_file_paths')
- def _config_file_paths_default(self):
- return [py3compat.getcwd()]
-
- extra_config_file = Unicode(
- help="""Path to an extra config file to load.
-
- If specified, load this config file in addition to any other IPython config.
- """).tag(config=True)
-
- @observe('extra_config_file')
- def _extra_config_file_changed(self, change):
- old = change['old']
- new = change['new']
- try:
- self.config_files.remove(old)
- except ValueError:
- pass
- self.config_file_specified.add(new)
- self.config_files.append(new)
-
- profile = Unicode(u'default',
- help="""The IPython profile to use."""
- ).tag(config=True)
-
- @observe('profile')
- def _profile_changed(self, change):
- self.builtin_profile_dir = os.path.join(
- get_ipython_package_dir(), u'config', u'profile', change['new']
- )
-
- ipython_dir = Unicode(
- help="""
- The name of the IPython directory. This directory is used for logging
- configuration (through profiles), history storage, etc. The default
- is usually $HOME/.ipython. This option can also be specified through
- the environment variable IPYTHONDIR.
- """
- ).tag(config=True)
-
- @default('ipython_dir')
- def _ipython_dir_default(self):
- d = get_ipython_dir()
- self._ipython_dir_changed({
- 'name': 'ipython_dir',
- 'old': d,
- 'new': d,
- })
- return d
-
- _in_init_profile_dir = False
- profile_dir = Instance(ProfileDir, allow_none=True)
-
- @default('profile_dir')
- def _profile_dir_default(self):
- # avoid recursion
- if self._in_init_profile_dir:
- return
- # profile_dir requested early, force initialization
- self.init_profile_dir()
- return self.profile_dir
-
- overwrite = Bool(False,
- help="""Whether to overwrite existing config files when copying"""
- ).tag(config=True)
- auto_create = Bool(False,
- help="""Whether to create profile dir if it doesn't exist"""
- ).tag(config=True)
-
- config_files = List(Unicode())
-
- @default('config_files')
- def _config_files_default(self):
- return [self.config_file_name]
-
- copy_config_files = Bool(False,
- help="""Whether to install the default config files into the profile dir.
- If a new profile is being created, and IPython contains config files for that
- profile, then they will be staged into the new directory. Otherwise,
- default config files will be automatically generated.
- """).tag(config=True)
-
- verbose_crash = Bool(False,
- help="""Create a massive crash report when IPython encounters what may be an
- internal error. The default is to append a short message to the
- usual traceback""").tag(config=True)
-
- # The class to use as the crash handler.
- crash_handler_class = Type(crashhandler.CrashHandler)
-
- @catch_config_error
- def __init__(self, **kwargs):
- super(BaseIPythonApplication, self).__init__(**kwargs)
- # ensure current working directory exists
- try:
- py3compat.getcwd()
- except:
- # exit if cwd doesn't exist
- self.log.error("Current working directory doesn't exist.")
- self.exit(1)
-
- # -------------------------------------------------------------------------
- # Various stages of Application creation
- # -------------------------------------------------------------------------
-
- deprecated_subcommands = {}
-
- def initialize_subcommand(self, subc, argv=None):
- if subc in self.deprecated_subcommands:
- self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
- "in future versions.".format(sub=subc))
- self.log.warning("You likely want to use `jupyter {sub}` in the "
- "future".format(sub=subc))
- return super(BaseIPythonApplication, self).initialize_subcommand(subc, argv)
-
- def init_crash_handler(self):
- """Create a crash handler, typically setting sys.excepthook to it."""
- self.crash_handler = self.crash_handler_class(self)
- sys.excepthook = self.excepthook
-
- def unset_crashhandler():
- sys.excepthook = sys.__excepthook__
-
- atexit.register(unset_crashhandler)
-
- def excepthook(self, etype, evalue, tb):
- """this is sys.excepthook after init_crashhandler
-
- set self.verbose_crash=True to use our full crashhandler, instead of
- a regular traceback with a short message (crash_handler_lite)
- """
-
- if self.verbose_crash:
- return self.crash_handler(etype, evalue, tb)
- else:
- return crashhandler.crash_handler_lite(etype, evalue, tb)
-
- @observe('ipython_dir')
- def _ipython_dir_changed(self, change):
- old = change['old']
- new = change['new']
- if old is not Undefined:
- str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
- sys.getfilesystemencoding()
- )
- if str_old in sys.path:
- sys.path.remove(str_old)
- str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
- sys.getfilesystemencoding()
- )
- sys.path.append(str_path)
- ensure_dir_exists(new)
- readme = os.path.join(new, 'README')
- readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
- if not os.path.exists(readme) and os.path.exists(readme_src):
- shutil.copy(readme_src, readme)
- for d in ('extensions', 'nbextensions'):
- path = os.path.join(new, d)
- try:
- ensure_dir_exists(path)
- except OSError as e:
- # this will not be EEXIST
- self.log.error("couldn't create path %s: %s", path, e)
- self.log.debug("IPYTHONDIR set to: %s" % new)
-
- def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
- """Load the config file.
-
- By default, errors in loading config are handled, and a warning
- printed on screen. For testing, the suppress_errors option is set
- to False, so errors will make tests fail.
-
- `supress_errors` default value is to be `None` in which case the
- behavior default to the one of `traitlets.Application`.
-
- The default value can be set :
- - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive).
- - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive).
- - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
-
- Any other value are invalid, and will make IPython exit with a non-zero return code.
- """
-
- self.log.debug("Searching path %s for config files", self.config_file_paths)
- base_config = 'ipython_config.py'
- self.log.debug("Attempting to load config file: %s" %
- base_config)
- try:
- if suppress_errors is not None:
- old_value = Application.raise_config_file_errors
- Application.raise_config_file_errors = not suppress_errors;
- Application.load_config_file(
- self,
- base_config,
- path=self.config_file_paths
- )
- except ConfigFileNotFound:
- # ignore errors loading parent
- self.log.debug("Config file %s not found", base_config)
- pass
- if suppress_errors is not None:
- Application.raise_config_file_errors = old_value
-
- for config_file_name in self.config_files:
- if not config_file_name or config_file_name == base_config:
- continue
- self.log.debug("Attempting to load config file: %s" %
- self.config_file_name)
- try:
- Application.load_config_file(
- self,
- config_file_name,
- path=self.config_file_paths
- )
- except ConfigFileNotFound:
- # Only warn if the default config file was NOT being used.
- if config_file_name in self.config_file_specified:
- msg = self.log.warning
- else:
- msg = self.log.debug
- msg("Config file not found, skipping: %s", config_file_name)
- except Exception:
- # For testing purposes.
- if not suppress_errors:
- raise
- self.log.warning("Error loading config file: %s" %
- self.config_file_name, exc_info=True)
-
- def init_profile_dir(self):
- """initialize the profile dir"""
- self._in_init_profile_dir = True
- if self.profile_dir is not None:
- # already ran
- return
- if 'ProfileDir.location' not in self.config:
- # location not specified, find by profile name
- try:
- p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
- except ProfileDirError:
- # not found, maybe create it (always create default profile)
- if self.auto_create or self.profile == 'default':
- try:
- p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
- except ProfileDirError:
- self.log.fatal("Could not create profile: %r" % self.profile)
- self.exit(1)
- else:
- self.log.info("Created profile dir: %r" % p.location)
- else:
- self.log.fatal("Profile %r not found." % self.profile)
- self.exit(1)
- else:
- self.log.debug("Using existing profile dir: %r" % p.location)
- else:
- location = self.config.ProfileDir.location
- # location is fully specified
- try:
- p = ProfileDir.find_profile_dir(location, self.config)
- except ProfileDirError:
- # not found, maybe create it
- if self.auto_create:
- try:
- p = ProfileDir.create_profile_dir(location, self.config)
- except ProfileDirError:
- self.log.fatal("Could not create profile directory: %r" % location)
- self.exit(1)
- else:
- self.log.debug("Creating new profile dir: %r" % location)
- else:
- self.log.fatal("Profile directory %r not found." % location)
- self.exit(1)
- else:
- self.log.info("Using existing profile dir: %r" % location)
- # if profile_dir is specified explicitly, set profile name
- dir_name = os.path.basename(p.location)
- if dir_name.startswith('profile_'):
- self.profile = dir_name[8:]
-
- self.profile_dir = p
- self.config_file_paths.append(p.location)
- self._in_init_profile_dir = False
-
- def init_config_files(self):
- """[optionally] copy default config files into profile dir."""
- self.config_file_paths.extend(ENV_CONFIG_DIRS)
- self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
- # copy config files
- path = self.builtin_profile_dir
- if self.copy_config_files:
- src = self.profile
-
- cfg = self.config_file_name
- if path and os.path.exists(os.path.join(path, cfg)):
- self.log.warning("Staging %r from %s into %r [overwrite=%s]" % (
- cfg, src, self.profile_dir.location, self.overwrite)
- )
- self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
- else:
- self.stage_default_config_file()
- else:
- # Still stage *bundled* config files, but not generated ones
- # This is necessary for `ipython profile=sympy` to load the profile
- # on the first go
- files = glob.glob(os.path.join(path, '*.py'))
- for fullpath in files:
- cfg = os.path.basename(fullpath)
- if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
- # file was copied
- self.log.warning("Staging bundled %s from %s into %r" % (
- cfg, self.profile, self.profile_dir.location)
- )
-
- def stage_default_config_file(self):
- """auto generate default config file, and stage it into the profile."""
- s = self.generate_config_file()
- fname = os.path.join(self.profile_dir.location, self.config_file_name)
- if self.overwrite or not os.path.exists(fname):
- self.log.warning("Generating default config file: %r" % (fname))
- with open(fname, 'w') as f:
- f.write(s)
-
- @catch_config_error
- def initialize(self, argv=None):
- # don't hook up crash handler before parsing command-line
- self.parse_command_line(argv)
- self.init_crash_handler()
- if self.subapp is not None:
- # stop here if subapp is taking over
- return
- # save a copy of CLI config to re-load after config files
- # so that it has highest priority
- cl_config = deepcopy(self.config)
- self.init_profile_dir()
- self.init_config_files()
- self.load_config_file()
- # enforce cl-opts override configfile opts:
- self.update_config(cl_config)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/autocall.py b/venv/lib/python2.7/site-packages/IPython/core/autocall.py
deleted file mode 100644
index 10f788f..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/autocall.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# encoding: utf-8
-"""
-Autocall capabilities for IPython.core.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-* Thomas Kluyver
-
-Notes
------
-"""
-
-
-# -----------------------------------------------------------------------------
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-
-
-# -----------------------------------------------------------------------------
-# Code
-# -----------------------------------------------------------------------------
-
-class IPyAutocall(object):
- """ Instances of this class are always autocalled
-
- This happens regardless of 'autocall' variable state. Use this to
- develop macro-like mechanisms.
- """
- _ip = None
- rewrite = True
-
- def __init__(self, ip=None):
- self._ip = ip
-
- def set_ip(self, ip):
- """ Will be used to set _ip point to current ipython instance b/f call
-
- Override this method if you don't want this to happen.
-
- """
- self._ip = ip
-
-
-class ExitAutocall(IPyAutocall):
- """An autocallable object which will be added to the user namespace so that
- exit, exit(), quit or quit() are all valid ways to close the shell."""
- rewrite = False
-
- def __call__(self):
- self._ip.ask_exit()
-
-
-class ZMQExitAutocall(ExitAutocall):
- """Exit IPython. Autocallable, so it needn't be explicitly called.
-
- Parameters
- ----------
- keep_kernel : bool
- If True, leave the kernel alive. Otherwise, tell the kernel to exit too
- (default).
- """
-
- def __call__(self, keep_kernel=False):
- self._ip.keepkernel_on_exit = keep_kernel
- self._ip.ask_exit()
diff --git a/venv/lib/python2.7/site-packages/IPython/core/builtin_trap.py b/venv/lib/python2.7/site-packages/IPython/core/builtin_trap.py
deleted file mode 100644
index 3fa9a5f..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/builtin_trap.py
+++ /dev/null
@@ -1,120 +0,0 @@
-"""
-A context manager for managing things injected into :mod:`__builtin__`.
-
-Authors:
-
-* Brian Granger
-* Fernando Perez
-"""
-# -----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# Complete license in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-
-from IPython.utils.py3compat import builtin_mod, iteritems
-from traitlets import Instance
-from traitlets.config.configurable import Configurable
-
-
-# -----------------------------------------------------------------------------
-# Classes and functions
-# -----------------------------------------------------------------------------
-
-class __BuiltinUndefined(object): pass
-
-
-BuiltinUndefined = __BuiltinUndefined()
-
-
-class __HideBuiltin(object): pass
-
-
-HideBuiltin = __HideBuiltin()
-
-
-class BuiltinTrap(Configurable):
- shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
- allow_none=True)
-
- def __init__(self, shell=None):
- super(BuiltinTrap, self).__init__(shell=shell, config=None)
- self._orig_builtins = {}
- # We define this to track if a single BuiltinTrap is nested.
- # Only turn off the trap when the outermost call to __exit__ is made.
- self._nested_level = 0
- self.shell = shell
- # builtins we always add - if set to HideBuiltin, they will just
- # be removed instead of being replaced by something else
- self.auto_builtins = {'exit': HideBuiltin,
- 'quit': HideBuiltin,
- 'get_ipython': self.shell.get_ipython,
- }
- # Recursive reload function
- try:
- from IPython.lib import deepreload
- if self.shell.deep_reload:
- from warnings import warn
- warn(
- "Automatically replacing builtin `reload` by `deepreload.reload` is deprecated since IPython 4.0, please import `reload` explicitly from `IPython.lib.deepreload",
- DeprecationWarning)
- self.auto_builtins['reload'] = deepreload._dreload
- else:
- self.auto_builtins['dreload'] = deepreload._dreload
- except ImportError:
- pass
-
- def __enter__(self):
- if self._nested_level == 0:
- self.activate()
- self._nested_level += 1
- # I return self, so callers can use add_builtin in a with clause.
- return self
-
- def __exit__(self, type, value, traceback):
- if self._nested_level == 1:
- self.deactivate()
- self._nested_level -= 1
- # Returning False will cause exceptions to propagate
- return False
-
- def add_builtin(self, key, value):
- """Add a builtin and save the original."""
- bdict = builtin_mod.__dict__
- orig = bdict.get(key, BuiltinUndefined)
- if value is HideBuiltin:
- if orig is not BuiltinUndefined: # same as 'key in bdict'
- self._orig_builtins[key] = orig
- del bdict[key]
- else:
- self._orig_builtins[key] = orig
- bdict[key] = value
-
- def remove_builtin(self, key, orig):
- """Remove an added builtin and re-set the original."""
- if orig is BuiltinUndefined:
- del builtin_mod.__dict__[key]
- else:
- builtin_mod.__dict__[key] = orig
-
- def activate(self):
- """Store ipython references in the __builtin__ namespace."""
-
- add_builtin = self.add_builtin
- for name, func in iteritems(self.auto_builtins):
- add_builtin(name, func)
-
- def deactivate(self):
- """Remove any builtins which might have been added by add_builtins, or
- restore overwritten ones to their previous values."""
- remove_builtin = self.remove_builtin
- for key, val in iteritems(self._orig_builtins):
- remove_builtin(key, val)
- self._orig_builtins.clear()
- self._builtins_added = False
diff --git a/venv/lib/python2.7/site-packages/IPython/core/compilerop.py b/venv/lib/python2.7/site-packages/IPython/core/compilerop.py
deleted file mode 100644
index 6c5a419..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/compilerop.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""Compiler tools with improved interactive support.
-
-Provides compilation machinery similar to codeop, but with caching support so
-we can provide interactive tracebacks.
-
-Authors
--------
-* Robert Kern
-* Fernando Perez
-* Thomas Kluyver
-"""
-
-# Note: though it might be more natural to name this module 'compiler', that
-# name is in the stdlib and name collisions with the stdlib tend to produce
-# weird problems (often with third-party tools).
-
-# -----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib imports
-import __future__
-import codeop
-import functools
-import hashlib
-import linecache
-import operator
-import time
-from ast import PyCF_ONLY_AST
-
-# -----------------------------------------------------------------------------
-# Constants
-# -----------------------------------------------------------------------------
-
-# Roughtly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
-# this is used as a bitmask to extract future-related code flags.
-PyCF_MASK = functools.reduce(operator.or_,
- (getattr(__future__, fname).compiler_flag
- for fname in __future__.all_feature_names))
-
-
-# -----------------------------------------------------------------------------
-# Local utilities
-# -----------------------------------------------------------------------------
-
-def code_name(code, number=0):
- """ Compute a (probably) unique name for code for caching.
-
- This now expects code to be unicode.
- """
- hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
- # Include the number and 12 characters of the hash in the name. It's
- # pretty much impossible that in a single session we'll have collisions
- # even with truncated hashes, and the full one makes tracebacks too long
- return ''.format(number, hash_digest[:12])
-
-
-# -----------------------------------------------------------------------------
-# Classes and functions
-# -----------------------------------------------------------------------------
-
-class CachingCompiler(codeop.Compile):
- """A compiler that caches code compiled from interactive statements.
- """
-
- def __init__(self):
- codeop.Compile.__init__(self)
-
- # This is ugly, but it must be done this way to allow multiple
- # simultaneous ipython instances to coexist. Since Python itself
- # directly accesses the data structures in the linecache module, and
- # the cache therein is global, we must work with that data structure.
- # We must hold a reference to the original checkcache routine and call
- # that in our own check_cache() below, but the special IPython cache
- # must also be shared by all IPython instances. If we were to hold
- # separate caches (one in each CachingCompiler instance), any call made
- # by Python itself to linecache.checkcache() would obliterate the
- # cached data from the other IPython instances.
- if not hasattr(linecache, '_ipython_cache'):
- linecache._ipython_cache = {}
- if not hasattr(linecache, '_checkcache_ori'):
- linecache._checkcache_ori = linecache.checkcache
- # Now, we must monkeypatch the linecache directly so that parts of the
- # stdlib that call it outside our control go through our codepath
- # (otherwise we'd lose our tracebacks).
- linecache.checkcache = check_linecache_ipython
-
- def ast_parse(self, source, filename='', symbol='exec'):
- """Parse code to an AST with the current compiler flags active.
-
- Arguments are exactly the same as ast.parse (in the standard library),
- and are passed to the built-in compile function."""
- return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
-
- def reset_compiler_flags(self):
- """Reset compiler flags to default state."""
- # This value is copied from codeop.Compile.__init__, so if that ever
- # changes, it will need to be updated.
- self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
-
- @property
- def compiler_flags(self):
- """Flags currently active in the compilation process.
- """
- return self.flags
-
- def cache(self, code, number=0):
- """Make a name for a block of code, and cache the code.
-
- Parameters
- ----------
- code : str
- The Python source code to cache.
- number : int
- A number which forms part of the code's name. Used for the execution
- counter.
-
- Returns
- -------
- The name of the cached code (as a string). Pass this as the filename
- argument to compilation, so that tracebacks are correctly hooked up.
- """
- name = code_name(code, number)
- entry = (len(code), time.time(),
- [line + '\n' for line in code.splitlines()], name)
- linecache.cache[name] = entry
- linecache._ipython_cache[name] = entry
- return name
-
-
-def check_linecache_ipython(*args):
- """Call linecache.checkcache() safely protecting our cached values.
- """
- # First call the orignal checkcache as intended
- linecache._checkcache_ori(*args)
- # Then, update back the cache with our data, so that tracebacks related
- # to our compiled codes can be produced.
- linecache.cache.update(linecache._ipython_cache)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/completer.py b/venv/lib/python2.7/site-packages/IPython/core/completer.py
deleted file mode 100644
index ad7aaa2..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/completer.py
+++ /dev/null
@@ -1,1196 +0,0 @@
-# encoding: utf-8
-"""Word completion for IPython.
-
-This module started as fork of the rlcompleter module in the Python standard
-library. The original enhancements made to rlcompleter have been sent
-upstream and were accepted as of Python 2.3,
-
-"""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-#
-# Some of this code originated from rlcompleter in the Python standard library
-# Copyright (C) 2001 Python Software Foundation, www.python.org
-
-from __future__ import print_function
-
-import glob
-import inspect
-import itertools
-import keyword
-import os
-import re
-import string
-import sys
-import unicodedata
-import warnings
-
-import __main__
-from IPython.core.error import TryNext
-from IPython.core.inputsplitter import ESC_MAGIC
-from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
-from IPython.utils import generics
-from IPython.utils.decorators import undoc
-from IPython.utils.dir2 import dir2, get_real_method
-from IPython.utils.process import arg_split
-from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
-from traitlets import Bool, Enum, observe
-from traitlets.config.configurable import Configurable
-
-# Public API
-__all__ = ['Completer', 'IPCompleter']
-
-if sys.platform == 'win32':
- PROTECTABLES = ' '
-else:
- PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
-
-# Protect against returning an enormous number of completions which the frontend
-# may have trouble processing.
-MATCHES_LIMIT = 500
-
-
-def has_open_quotes(s):
- """Return whether a string has open quotes.
-
- This simply counts whether the number of quote characters of either type in
- the string is odd.
-
- Returns
- -------
- If there is an open quote, the quote character is returned. Else, return
- False.
- """
- # We check " first, then ', so complex cases with nested quotes will get
- # the " to take precedence.
- if s.count('"') % 2:
- return '"'
- elif s.count("'") % 2:
- return "'"
- else:
- return False
-
-
-def protect_filename(s):
- """Escape a string to protect certain characters."""
- if set(s) & set(PROTECTABLES):
- if sys.platform == "win32":
- return '"' + s + '"'
- else:
- return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
- else:
- return s
-
-
-def expand_user(path):
- """Expand '~'-style usernames in strings.
-
- This is similar to :func:`os.path.expanduser`, but it computes and returns
- extra information that will be useful if the input was being used in
- computing completions, and you wish to return the completions with the
- original '~' instead of its expanded value.
-
- Parameters
- ----------
- path : str
- String to be expanded. If no ~ is present, the output is the same as the
- input.
-
- Returns
- -------
- newpath : str
- Result of ~ expansion in the input path.
- tilde_expand : bool
- Whether any expansion was performed or not.
- tilde_val : str
- The value that ~ was replaced with.
- """
- # Default values
- tilde_expand = False
- tilde_val = ''
- newpath = path
-
- if path.startswith('~'):
- tilde_expand = True
- rest = len(path) - 1
- newpath = os.path.expanduser(path)
- if rest:
- tilde_val = newpath[:-rest]
- else:
- tilde_val = newpath
-
- return newpath, tilde_expand, tilde_val
-
-
-def compress_user(path, tilde_expand, tilde_val):
- """Does the opposite of expand_user, with its outputs.
- """
- if tilde_expand:
- return path.replace(tilde_val, '~')
- else:
- return path
-
-
-def completions_sorting_key(word):
- """key for sorting completions
-
- This does several things:
-
- - Lowercase all completions, so they are sorted alphabetically with
- upper and lower case words mingled
- - Demote any completions starting with underscores to the end
- - Insert any %magic and %%cellmagic completions in the alphabetical order
- by their name
- """
- # Case insensitive sort
- word = word.lower()
-
- prio1, prio2 = 0, 0
-
- if word.startswith('__'):
- prio1 = 2
- elif word.startswith('_'):
- prio1 = 1
-
- if word.endswith('='):
- prio1 = -1
-
- if word.startswith('%%'):
- # If there's another % in there, this is something else, so leave it alone
- if not "%" in word[2:]:
- word = word[2:]
- prio2 = 2
- elif word.startswith('%'):
- if not "%" in word[1:]:
- word = word[1:]
- prio2 = 1
-
- return prio1, word, prio2
-
-
-@undoc
-class Bunch(object): pass
-
-
-if sys.platform == 'win32':
- DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
-else:
- DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
-
-GREEDY_DELIMS = ' =\r\n'
-
-
-class CompletionSplitter(object):
- """An object to split an input line in a manner similar to readline.
-
- By having our own implementation, we can expose readline-like completion in
- a uniform manner to all frontends. This object only needs to be given the
- line of text to be split and the cursor position on said line, and it
- returns the 'word' to be completed on at the cursor after splitting the
- entire line.
-
- What characters are used as splitting delimiters can be controlled by
- setting the `delims` attribute (this is a property that internally
- automatically builds the necessary regular expression)"""
-
- # Private interface
-
- # A string of delimiter characters. The default value makes sense for
- # IPython's most typical usage patterns.
- _delims = DELIMS
-
- # The expression (a normal string) to be compiled into a regular expression
- # for actual splitting. We store it as an attribute mostly for ease of
- # debugging, since this type of code can be so tricky to debug.
- _delim_expr = None
-
- # The regular expression that does the actual splitting
- _delim_re = None
-
- def __init__(self, delims=None):
- delims = CompletionSplitter._delims if delims is None else delims
- self.delims = delims
-
- @property
- def delims(self):
- """Return the string of delimiter characters."""
- return self._delims
-
- @delims.setter
- def delims(self, delims):
- """Set the delimiters for line splitting."""
- expr = '[' + ''.join('\\' + c for c in delims) + ']'
- self._delim_re = re.compile(expr)
- self._delims = delims
- self._delim_expr = expr
-
- def split_line(self, line, cursor_pos=None):
- """Split a line of text with a cursor at the given position.
- """
- l = line if cursor_pos is None else line[:cursor_pos]
- return self._delim_re.split(l)[-1]
-
-
-class Completer(Configurable):
- greedy = Bool(False,
- help="""Activate greedy completion
- PENDING DEPRECTION. this is now mostly taken care of with Jedi.
-
- This will enable completion on elements of lists, results of function calls, etc.,
- but can be unsafe because the code is actually evaluated on TAB.
- """
- ).tag(config=True)
-
- backslash_combining_completions = Bool(True,
- help="Enable unicode completions, e.g. \\alpha . "
- "Includes completion of latex commands, unicode names, and expanding "
- "unicode characters back to latex commands.").tag(config=True)
-
- def __init__(self, namespace=None, global_namespace=None, **kwargs):
- """Create a new completer for the command line.
-
- Completer(namespace=ns, global_namespace=ns2) -> completer instance.
-
- If unspecified, the default namespace where completions are performed
- is __main__ (technically, __main__.__dict__). Namespaces should be
- given as dictionaries.
-
- An optional second namespace can be given. This allows the completer
- to handle cases where both the local and global scopes need to be
- distinguished.
-
- Completer instances should be used as the completion mechanism of
- readline via the set_completer() call:
-
- readline.set_completer(Completer(my_namespace).complete)
- """
-
- # Don't bind to namespace quite yet, but flag whether the user wants a
- # specific namespace or to use __main__.__dict__. This will allow us
- # to bind to __main__.__dict__ at completion time, not now.
- if namespace is None:
- self.use_main_ns = 1
- else:
- self.use_main_ns = 0
- self.namespace = namespace
-
- # The global namespace, if given, can be bound directly
- if global_namespace is None:
- self.global_namespace = {}
- else:
- self.global_namespace = global_namespace
-
- super(Completer, self).__init__(**kwargs)
-
- def complete(self, text, state):
- """Return the next possible completion for 'text'.
-
- This is called successively with state == 0, 1, 2, ... until it
- returns None. The completion should begin with 'text'.
-
- """
- if self.use_main_ns:
- self.namespace = __main__.__dict__
-
- if state == 0:
- if "." in text:
- self.matches = self.attr_matches(text)
- else:
- self.matches = self.global_matches(text)
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- def global_matches(self, text):
- """Compute matches when text is a simple name.
-
- Return a list of all keywords, built-in functions and names currently
- defined in self.namespace or self.global_namespace that match.
-
- """
- matches = []
- match_append = matches.append
- n = len(text)
- for lst in [keyword.kwlist,
- builtin_mod.__dict__.keys(),
- self.namespace.keys(),
- self.global_namespace.keys()]:
- for word in lst:
- if word[:n] == text and word != "__builtins__":
- match_append(word)
- return [cast_unicode_py2(m) for m in matches]
-
- def attr_matches(self, text):
- """Compute matches when text contains a dot.
-
- Assuming the text is of the form NAME.NAME....[NAME], and is
- evaluatable in self.namespace or self.global_namespace, it will be
- evaluated and its attributes (as revealed by dir()) are used as
- possible completions. (For class instances, class members are are
- also considered.)
-
- WARNING: this can still invoke arbitrary C code, if an object
- with a __getattr__ hook is evaluated.
-
- """
-
- # Another option, seems to work great. Catches things like ''.
- m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
-
- if m:
- expr, attr = m.group(1, 3)
- elif self.greedy:
- m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
- if not m2:
- return []
- expr, attr = m2.group(1, 2)
- else:
- return []
-
- try:
- obj = eval(expr, self.namespace)
- except:
- try:
- obj = eval(expr, self.global_namespace)
- except:
- return []
-
- if self.limit_to__all__ and hasattr(obj, '__all__'):
- words = get__all__entries(obj)
- else:
- words = dir2(obj)
-
- try:
- words = generics.complete_object(obj, words)
- except TryNext:
- pass
- except Exception:
- # Silence errors from completion function
- # raise # dbg
- pass
- # Build match list to return
- n = len(attr)
- return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr]
-
-
-def get__all__entries(obj):
- """returns the strings in the __all__ attribute"""
- try:
- words = getattr(obj, '__all__')
- except:
- return []
-
- return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
-
-
-def match_dict_keys(keys, prefix, delims):
- """Used by dict_key_matches, matching the prefix to a list of keys"""
- if not prefix:
- return None, 0, [repr(k) for k in keys
- if isinstance(k, (string_types, bytes))]
- quote_match = re.search('["\']', prefix)
- quote = quote_match.group()
- try:
- prefix_str = eval(prefix + quote, {})
- except Exception:
- return None, 0, []
-
- pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
- token_match = re.search(pattern, prefix, re.UNICODE)
- token_start = token_match.start()
- token_prefix = token_match.group()
-
- # TODO: support bytes in Py3k
- matched = []
- for key in keys:
- try:
- if not key.startswith(prefix_str):
- continue
- except (AttributeError, TypeError, UnicodeError):
- # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
- continue
-
- # reformat remainder of key to begin with prefix
- rem = key[len(prefix_str):]
- # force repr wrapped in '
- rem_repr = repr(rem + '"')
- if rem_repr.startswith('u') and prefix[0] not in 'uU':
- # Found key is unicode, but prefix is Py2 string.
- # Therefore attempt to interpret key as string.
- try:
- rem_repr = repr(rem.encode('ascii') + '"')
- except UnicodeEncodeError:
- continue
-
- rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
- if quote == '"':
- # The entered prefix is quoted with ",
- # but the match is quoted with '.
- # A contained " hence needs escaping for comparison:
- rem_repr = rem_repr.replace('"', '\\"')
-
- # then reinsert prefix from start of token
- matched.append('%s%s' % (token_prefix, rem_repr))
- return quote, token_start, matched
-
-
-def _safe_isinstance(obj, module, class_name):
- """Checks if obj is an instance of module.class_name if loaded
- """
- return (module in sys.modules and
- isinstance(obj, getattr(__import__(module), class_name)))
-
-
-def back_unicode_name_matches(text):
- u"""Match unicode characters back to unicode name
-
- This does ☃ -> \\snowman
-
- Note that snowman is not a valid python3 combining character but will be expanded.
- Though it will not recombine back to the snowman character by the completion machinery.
-
- This will not either back-complete standard sequences like \\n, \\b ...
-
- Used on Python 3 only.
- """
- if len(text) < 2:
- return u'', ()
- maybe_slash = text[-2]
- if maybe_slash != '\\':
- return u'', ()
-
- char = text[-1]
- # no expand on quote for completion in strings.
- # nor backcomplete standard ascii keys
- if char in string.ascii_letters or char in ['"', "'"]:
- return u'', ()
- try:
- unic = unicodedata.name(char)
- return '\\' + char, ['\\' + unic]
- except KeyError:
- pass
- return u'', ()
-
-
-def back_latex_name_matches(text):
- u"""Match latex characters back to unicode name
-
- This does ->\\sqrt
-
- Used on Python 3 only.
- """
- if len(text) < 2:
- return u'', ()
- maybe_slash = text[-2]
- if maybe_slash != '\\':
- return u'', ()
-
- char = text[-1]
- # no expand on quote for completion in strings.
- # nor backcomplete standard ascii keys
- if char in string.ascii_letters or char in ['"', "'"]:
- return u'', ()
- try:
- latex = reverse_latex_symbol[char]
- # '\\' replace the \ as well
- return '\\' + char, [latex]
- except KeyError:
- pass
- return u'', ()
-
-
-class IPCompleter(Completer):
- """Extension of the completer class with IPython-specific features"""
-
- @observe('greedy')
- def _greedy_changed(self, change):
- """update the splitter and readline delims when greedy is changed"""
- if change['new']:
- self.splitter.delims = GREEDY_DELIMS
- else:
- self.splitter.delims = DELIMS
-
- if self.readline:
- self.readline.set_completer_delims(self.splitter.delims)
-
- merge_completions = Bool(True,
- help="""Whether to merge completion results into a single list
-
- If False, only the completion results from the first non-empty
- completer will be returned.
- """
- ).tag(config=True)
- omit__names = Enum((0, 1, 2), default_value=2,
- help="""Instruct the completer to omit private method names
-
- Specifically, when completing on ``object.``.
-
- When 2 [default]: all names that start with '_' will be excluded.
-
- When 1: all 'magic' names (``__foo__``) will be excluded.
-
- When 0: nothing will be excluded.
- """
- ).tag(config=True)
- limit_to__all__ = Bool(False,
- help="""
- DEPRECATED as of version 5.0.
-
- Instruct the completer to use __all__ for the completion
-
- Specifically, when completing on ``object.``.
-
- When True: only those names in obj.__all__ will be included.
-
- When False [default]: the __all__ attribute is ignored
- """,
- ).tag(config=True)
-
- @observe('limit_to__all__')
- def _limit_to_all_changed(self, change):
- warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
- 'value has been deprecated since IPython 5.0, will be made to have '
- 'no effects and then removed in future version of IPython.',
- UserWarning)
-
- def __init__(self, shell=None, namespace=None, global_namespace=None,
- use_readline=True, config=None, **kwargs):
- """IPCompleter() -> completer
-
- Return a completer object suitable for use by the readline library
- via readline.set_completer().
-
- Inputs:
-
- - shell: a pointer to the ipython shell itself. This is needed
- because this completer knows about magic functions, and those can
- only be accessed via the ipython instance.
-
- - namespace: an optional dict where completions are performed.
-
- - global_namespace: secondary optional dict for completions, to
- handle cases (such as IPython embedded inside functions) where
- both Python scopes are visible.
-
- use_readline : bool, optional
- If true, use the readline library. This completer can still function
- without readline, though in that case callers must provide some extra
- information on each call about the current line."""
-
- self.magic_escape = ESC_MAGIC
- self.splitter = CompletionSplitter()
-
- # Readline configuration, only used by the rlcompleter method.
- if use_readline:
- # We store the right version of readline so that later code
- import IPython.utils.rlineimpl as readline
- self.readline = readline
- else:
- self.readline = None
-
- # _greedy_changed() depends on splitter and readline being defined:
- Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
- config=config, **kwargs)
-
- # List where completion matches will be stored
- self.matches = []
- self.shell = shell
- # Regexp to split filenames with spaces in them
- self.space_name_re = re.compile(r'([^\\] )')
- # Hold a local ref. to glob.glob for speed
- self.glob = glob.glob
-
- # Determine if we are running on 'dumb' terminals, like (X)Emacs
- # buffers, to avoid completion problems.
- term = os.environ.get('TERM', 'xterm')
- self.dumb_terminal = term in ['dumb', 'emacs']
-
- # Special handling of backslashes needed in win32 platforms
- if sys.platform == "win32":
- self.clean_glob = self._clean_glob_win32
- else:
- self.clean_glob = self._clean_glob
-
- # regexp to parse docstring for function signature
- self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
- self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
- # use this if positional argument name is also needed
- # = re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
-
- # All active matcher routines for completion
- self.matchers = [
- self.python_matches,
- self.file_matches,
- self.magic_matches,
- self.python_func_kw_matches,
- self.dict_key_matches,
- ]
-
- # This is set externally by InteractiveShell
- self.custom_completers = None
-
- def all_completions(self, text):
- """
- Wrapper around the complete method for the benefit of emacs.
- """
- return self.complete(text)[1]
-
- def _clean_glob(self, text):
- return self.glob("%s*" % text)
-
- def _clean_glob_win32(self, text):
- return [f.replace("\\", "/")
- for f in self.glob("%s*" % text)]
-
- def file_matches(self, text):
- """Match filenames, expanding ~USER type strings.
-
- Most of the seemingly convoluted logic in this completer is an
- attempt to handle filenames with spaces in them. And yet it's not
- quite perfect, because Python's readline doesn't expose all of the
- GNU readline details needed for this to be done correctly.
-
- For a filename with a space in it, the printed completions will be
- only the parts after what's already been typed (instead of the
- full completions, as is normally done). I don't think with the
- current (as of Python 2.3) Python readline it's possible to do
- better."""
-
- # chars that require escaping with backslash - i.e. chars
- # that readline treats incorrectly as delimiters, but we
- # don't want to treat as delimiters in filename matching
- # when escaped with backslash
- if text.startswith('!'):
- text = text[1:]
- text_prefix = u'!'
- else:
- text_prefix = u''
-
- text_until_cursor = self.text_until_cursor
- # track strings with open quotes
- open_quotes = has_open_quotes(text_until_cursor)
-
- if '(' in text_until_cursor or '[' in text_until_cursor:
- lsplit = text
- else:
- try:
- # arg_split ~ shlex.split, but with unicode bugs fixed by us
- lsplit = arg_split(text_until_cursor)[-1]
- except ValueError:
- # typically an unmatched ", or backslash without escaped char.
- if open_quotes:
- lsplit = text_until_cursor.split(open_quotes)[-1]
- else:
- return []
- except IndexError:
- # tab pressed on empty line
- lsplit = ""
-
- if not open_quotes and lsplit != protect_filename(lsplit):
- # if protectables are found, do matching on the whole escaped name
- has_protectables = True
- text0, text = text, lsplit
- else:
- has_protectables = False
- text = os.path.expanduser(text)
-
- if text == "":
- return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
-
- # Compute the matches from the filesystem
- if sys.platform == 'win32':
- m0 = self.clean_glob(text)
- else:
- m0 = self.clean_glob(text.replace('\\', ''))
-
- if has_protectables:
- # If we had protectables, we need to revert our changes to the
- # beginning of filename so that we don't double-write the part
- # of the filename we have so far
- len_lsplit = len(lsplit)
- matches = [text_prefix + text0 +
- protect_filename(f[len_lsplit:]) for f in m0]
- else:
- if open_quotes:
- # if we have a string with an open quote, we don't need to
- # protect the names at all (and we _shouldn't_, as it
- # would cause bugs when the filesystem call is made).
- matches = m0
- else:
- matches = [text_prefix +
- protect_filename(f) for f in m0]
-
- # Mark directories in input list by appending '/' to their names.
- return [cast_unicode_py2(x + '/') if os.path.isdir(x) else x for x in matches]
-
- def magic_matches(self, text):
- """Match magics"""
- # Get all shell magics now rather than statically, so magics loaded at
- # runtime show up too.
- lsm = self.shell.magics_manager.lsmagic()
- line_magics = lsm['line']
- cell_magics = lsm['cell']
- pre = self.magic_escape
- pre2 = pre + pre
-
- # Completion logic:
- # - user gives %%: only do cell magics
- # - user gives %: do both line and cell magics
- # - no prefix: do both
- # In other words, line magics are skipped if the user gives %% explicitly
- bare_text = text.lstrip(pre)
- comp = [pre2 + m for m in cell_magics if m.startswith(bare_text)]
- if not text.startswith(pre2):
- comp += [pre + m for m in line_magics if m.startswith(bare_text)]
- return [cast_unicode_py2(c) for c in comp]
-
- def python_matches(self, text):
- """Match attributes or global python names"""
- if "." in text:
- try:
- matches = self.attr_matches(text)
- if text.endswith('.') and self.omit__names:
- if self.omit__names == 1:
- # true if txt is _not_ a __ name, false otherwise:
- no__name = (lambda txt:
- re.match(r'.*\.__.*?__', txt) is None)
- else:
- # true if txt is _not_ a _ name, false otherwise:
- no__name = (lambda txt:
- re.match(r'\._.*?', txt[txt.rindex('.'):]) is None)
- matches = filter(no__name, matches)
- except NameError:
- # catches .
- matches = []
- else:
- matches = self.global_matches(text)
- return matches
-
- def _default_arguments_from_docstring(self, doc):
- """Parse the first line of docstring for call signature.
-
- Docstring should be of the form 'min(iterable[, key=func])\n'.
- It can also parse cython docstring of the form
- 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
- """
- if doc is None:
- return []
-
- # care only the firstline
- line = doc.lstrip().splitlines()[0]
-
- # p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
- # 'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
- sig = self.docstring_sig_re.search(line)
- if sig is None:
- return []
- # iterable[, key=func]' -> ['iterable[' ,' key=func]']
- sig = sig.groups()[0].split(',')
- ret = []
- for s in sig:
- # re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
- ret += self.docstring_kwd_re.findall(s)
- return ret
-
- def _default_arguments(self, obj):
- """Return the list of default arguments of obj if it is callable,
- or empty list otherwise."""
- call_obj = obj
- ret = []
- if inspect.isbuiltin(obj):
- pass
- elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
- if inspect.isclass(obj):
- # for cython embededsignature=True the constructor docstring
- # belongs to the object itself not __init__
- ret += self._default_arguments_from_docstring(
- getattr(obj, '__doc__', ''))
- # for classes, check for __init__,__new__
- call_obj = (getattr(obj, '__init__', None) or
- getattr(obj, '__new__', None))
- # for all others, check if they are __call__able
- elif hasattr(obj, '__call__'):
- call_obj = obj.__call__
- ret += self._default_arguments_from_docstring(
- getattr(call_obj, '__doc__', ''))
-
- if PY3:
- _keeps = (inspect.Parameter.KEYWORD_ONLY,
- inspect.Parameter.POSITIONAL_OR_KEYWORD)
- signature = inspect.signature
- else:
- import IPython.utils.signatures
- _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
- IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
- signature = IPython.utils.signatures.signature
-
- try:
- sig = signature(call_obj)
- ret.extend(k for k, v in sig.parameters.items() if
- v.kind in _keeps)
- except ValueError:
- pass
-
- return list(set(ret))
-
- def python_func_kw_matches(self, text):
- """Match named parameters (kwargs) of the last open function"""
-
- if "." in text: # a parameter cannot be dotted
- return []
- try:
- regexp = self.__funcParamsRegex
- except AttributeError:
- regexp = self.__funcParamsRegex = re.compile(r'''
- '.*?(?,a=1)", the candidate is "foo"
- tokens = regexp.findall(self.text_until_cursor)
- tokens.reverse()
- iterTokens = iter(tokens);
- openPar = 0
-
- for token in iterTokens:
- if token == ')':
- openPar -= 1
- elif token == '(':
- openPar += 1
- if openPar > 0:
- # found the last unclosed parenthesis
- break
- else:
- return []
- # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
- ids = []
- isId = re.compile(r'\w+$').match
-
- while True:
- try:
- ids.append(next(iterTokens))
- if not isId(ids[-1]):
- ids.pop();
- break
- if not next(iterTokens) == '.':
- break
- except StopIteration:
- break
- # lookup the candidate callable matches either using global_matches
- # or attr_matches for dotted names
- if len(ids) == 1:
- callableMatches = self.global_matches(ids[0])
- else:
- callableMatches = self.attr_matches('.'.join(ids[::-1]))
- argMatches = []
- for callableMatch in callableMatches:
- try:
- namedArgs = self._default_arguments(eval(callableMatch,
- self.namespace))
- except:
- continue
-
- for namedArg in namedArgs:
- if namedArg.startswith(text):
- argMatches.append(u"%s=" % namedArg)
- return argMatches
-
- def dict_key_matches(self, text):
- "Match string keys in a dictionary, after e.g. 'foo[' "
-
- def get_keys(obj):
- # Objects can define their own completions by defining an
- # _ipy_key_completions_() method.
- method = get_real_method(obj, '_ipython_key_completions_')
- if method is not None:
- return method()
-
- # Special case some common in-memory dict-like types
- if isinstance(obj, dict) or \
- _safe_isinstance(obj, 'pandas', 'DataFrame'):
- try:
- return list(obj.keys())
- except Exception:
- return []
- elif _safe_isinstance(obj, 'numpy', 'ndarray') or \
- _safe_isinstance(obj, 'numpy', 'void'):
- return obj.dtype.names or []
- return []
-
- try:
- regexps = self.__dict_key_regexps
- except AttributeError:
- dict_key_re_fmt = r'''(?x)
- ( # match dict-referring expression wrt greedy setting
- %s
- )
- \[ # open bracket
- \s* # and optional whitespace
- ([uUbB]? # string prefix (r not handled)
- (?: # unclosed string
- '(?:[^']|(? key_start:
- leading = ''
- else:
- leading = text[text_start:completion_start]
-
- # the index of the `[` character
- bracket_idx = match.end(1)
-
- # append closing quote and bracket as appropriate
- # this is *not* appropriate if the opening quote or bracket is outside
- # the text given to this method
- suf = ''
- continuation = self.line_buffer[len(self.text_until_cursor):]
- if key_start > text_start and closing_quote:
- # quotes were opened inside text, maybe close them
- if continuation.startswith(closing_quote):
- continuation = continuation[len(closing_quote):]
- else:
- suf += closing_quote
- if bracket_idx > text_start:
- # brackets were opened inside text, maybe close them
- if not continuation.startswith(']'):
- suf += ']'
-
- return [leading + k + suf for k in matches]
-
- def unicode_name_matches(self, text):
- u"""Match Latex-like syntax for unicode characters base
- on the name of the character.
-
- This does \\GREEK SMALL LETTER ETA -> η
-
- Works only on valid python 3 identifier, or on combining characters that
- will combine to form a valid identifier.
-
- Used on Python 3 only.
- """
- slashpos = text.rfind('\\')
- if slashpos > -1:
- s = text[slashpos + 1:]
- try:
- unic = unicodedata.lookup(s)
- # allow combining chars
- if ('a' + unic).isidentifier():
- return '\\' + s, [unic]
- except KeyError:
- pass
- return u'', []
-
- def latex_matches(self, text):
- u"""Match Latex syntax for unicode characters.
-
- This does both \\alp -> \\alpha and \\alpha -> α
-
- Used on Python 3 only.
- """
- slashpos = text.rfind('\\')
- if slashpos > -1:
- s = text[slashpos:]
- if s in latex_symbols:
- # Try to complete a full latex symbol to unicode
- # \\alpha -> α
- return s, [latex_symbols[s]]
- else:
- # If a user has partially typed a latex symbol, give them
- # a full list of options \al -> [\aleph, \alpha]
- matches = [k for k in latex_symbols if k.startswith(s)]
- return s, matches
- return u'', []
-
- def dispatch_custom_completer(self, text):
- if not self.custom_completers:
- return
-
- line = self.line_buffer
- if not line.strip():
- return None
-
- # Create a little structure to pass all the relevant information about
- # the current completion to any custom completer.
- event = Bunch()
- event.line = line
- event.symbol = text
- cmd = line.split(None, 1)[0]
- event.command = cmd
- event.text_until_cursor = self.text_until_cursor
-
- # for foo etc, try also to find completer for %foo
- if not cmd.startswith(self.magic_escape):
- try_magic = self.custom_completers.s_matches(
- self.magic_escape + cmd)
- else:
- try_magic = []
-
- for c in itertools.chain(self.custom_completers.s_matches(cmd),
- try_magic,
- self.custom_completers.flat_matches(self.text_until_cursor)):
- try:
- res = c(event)
- if res:
- # first, try case sensitive match
- withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
- if withcase:
- return withcase
- # if none, then case insensitive ones are ok too
- text_low = text.lower()
- return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
- except TryNext:
- pass
- except KeyboardInterrupt:
- """
- If custom completer take too long,
- let keyboard interrupt abort and return nothing.
- """
- break
-
- return None
-
- def complete(self, text=None, line_buffer=None, cursor_pos=None):
- """Find completions for the given text and line context.
-
- Note that both the text and the line_buffer are optional, but at least
- one of them must be given.
-
- Parameters
- ----------
- text : string, optional
- Text to perform the completion on. If not given, the line buffer
- is split using the instance's CompletionSplitter object.
-
- line_buffer : string, optional
- If not given, the completer attempts to obtain the current line
- buffer via readline. This keyword allows clients which are
- requesting for text completions in non-readline contexts to inform
- the completer of the entire text.
-
- cursor_pos : int, optional
- Index of the cursor in the full line buffer. Should be provided by
- remote frontends where kernel has no access to frontend state.
-
- Returns
- -------
- text : str
- Text that was actually used in the completion.
-
- matches : list
- A list of completion matches.
- """
- # if the cursor position isn't given, the only sane assumption we can
- # make is that it's at the end of the line (the common case)
- if cursor_pos is None:
- cursor_pos = len(line_buffer) if text is None else len(text)
-
- if self.use_main_ns:
- self.namespace = __main__.__dict__
-
- if PY3 and self.backslash_combining_completions:
-
- base_text = text if not line_buffer else line_buffer[:cursor_pos]
- latex_text, latex_matches = self.latex_matches(base_text)
- if latex_matches:
- return latex_text, latex_matches
- name_text = ''
- name_matches = []
- for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
- name_text, name_matches = meth(base_text)
- if name_text:
- return name_text, name_matches[:MATCHES_LIMIT]
-
- # if text is either None or an empty string, rely on the line buffer
- if not text:
- text = self.splitter.split_line(line_buffer, cursor_pos)
-
- # If no line buffer is given, assume the input text is all there was
- if line_buffer is None:
- line_buffer = text
-
- self.line_buffer = line_buffer
- self.text_until_cursor = self.line_buffer[:cursor_pos]
-
- # Start with a clean slate of completions
- self.matches[:] = []
- custom_res = self.dispatch_custom_completer(text)
- if custom_res is not None:
- # did custom completers produce something?
- self.matches = custom_res
- else:
- # Extend the list of completions with the results of each
- # matcher, so we return results to the user from all
- # namespaces.
- if self.merge_completions:
- self.matches = []
- for matcher in self.matchers:
- try:
- self.matches.extend(matcher(text))
- except:
- # Show the ugly traceback if the matcher causes an
- # exception, but do NOT crash the kernel!
- sys.excepthook(*sys.exc_info())
- else:
- for matcher in self.matchers:
- self.matches = matcher(text)
- if self.matches:
- break
- # FIXME: we should extend our api to return a dict with completions for
- # different types of objects. The rlcomplete() method could then
- # simply collapse the dict into a list for readline, but we'd have
- # richer completion semantics in other evironments.
- self.matches = sorted(set(self.matches), key=completions_sorting_key)[:MATCHES_LIMIT]
-
- return text, self.matches
diff --git a/venv/lib/python2.7/site-packages/IPython/core/completerlib.py b/venv/lib/python2.7/site-packages/IPython/core/completerlib.py
deleted file mode 100644
index 089dba7..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/completerlib.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# encoding: utf-8
-"""Implementations for various useful completers.
-
-These are all loaded by default by IPython.
-"""
-# -----------------------------------------------------------------------------
-# Copyright (C) 2010-2011 The IPython Development Team.
-#
-# Distributed under the terms of the BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-from __future__ import print_function
-
-# Stdlib imports
-import glob
-import inspect
-import os
-import re
-import sys
-
-try:
- # Python >= 3.3
- from importlib.machinery import all_suffixes
-
- _suffixes = all_suffixes()
-except ImportError:
- from imp import get_suffixes
-
- _suffixes = [s[0] for s in get_suffixes()]
-
-# Third-party imports
-from time import time
-from zipimport import zipimporter
-
-# Our own imports
-from IPython.core.completer import expand_user, compress_user
-from IPython.core.error import TryNext
-from IPython.utils._process_common import arg_split
-from IPython.utils.py3compat import string_types
-
-# FIXME: this should be pulled in with the right call via the component system
-from IPython import get_ipython
-
-# -----------------------------------------------------------------------------
-# Globals and constants
-# -----------------------------------------------------------------------------
-
-# Time in seconds after which the rootmodules will be stored permanently in the
-# ipython ip.db database (kept in the user's .ipython dir).
-TIMEOUT_STORAGE = 2
-
-# Time in seconds after which we give up
-TIMEOUT_GIVEUP = 20
-
-# Regular expression for the python import statement
-import_re = re.compile(r'(?P[a-zA-Z_][a-zA-Z0-9_]*?)'
- r'(?P[/\\]__init__)?'
- r'(?P%s)$' %
- r'|'.join(re.escape(s) for s in _suffixes))
-
-# RE for the ipython %run command (python + ipython scripts)
-magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
-
-
-# -----------------------------------------------------------------------------
-# Local utilities
-# -----------------------------------------------------------------------------
-
-def module_list(path):
- """
- Return the list containing the names of the modules available in the given
- folder.
- """
- # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
- if path == '':
- path = '.'
-
- # A few local constants to be used in loops below
- pjoin = os.path.join
-
- if os.path.isdir(path):
- # Build a list of all files in the directory and all files
- # in its subdirectories. For performance reasons, do not
- # recurse more than one level into subdirectories.
- files = []
- for root, dirs, nondirs in os.walk(path, followlinks=True):
- subdir = root[len(path) + 1:]
- if subdir:
- files.extend(pjoin(subdir, f) for f in nondirs)
- dirs[:] = [] # Do not recurse into additional subdirectories.
- else:
- files.extend(nondirs)
-
- else:
- try:
- files = list(zipimporter(path)._files.keys())
- except:
- files = []
-
- # Build a list of modules which match the import_re regex.
- modules = []
- for f in files:
- m = import_re.match(f)
- if m:
- modules.append(m.group('name'))
- return list(set(modules))
-
-
-def get_root_modules():
- """
- Returns a list containing the names of all the modules available in the
- folders of the pythonpath.
-
- ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
- """
- ip = get_ipython()
- if ip is None:
- # No global shell instance to store cached list of modules.
- # Don't try to scan for modules every time.
- return list(sys.builtin_module_names)
-
- rootmodules_cache = ip.db.get('rootmodules_cache', {})
- rootmodules = list(sys.builtin_module_names)
- start_time = time()
- store = False
- for path in sys.path:
- try:
- modules = rootmodules_cache[path]
- except KeyError:
- modules = module_list(path)
- try:
- modules.remove('__init__')
- except ValueError:
- pass
- if path not in ('', '.'): # cwd modules should not be cached
- rootmodules_cache[path] = modules
- if time() - start_time > TIMEOUT_STORAGE and not store:
- store = True
- print("\nCaching the list of root modules, please wait!")
- print("(This will only be done once - type '%rehashx' to "
- "reset cache!)\n")
- sys.stdout.flush()
- if time() - start_time > TIMEOUT_GIVEUP:
- print("This is taking too long, we give up.\n")
- return []
- rootmodules.extend(modules)
- if store:
- ip.db['rootmodules_cache'] = rootmodules_cache
- rootmodules = list(set(rootmodules))
- return rootmodules
-
-
-def is_importable(module, attr, only_modules):
- if only_modules:
- return inspect.ismodule(getattr(module, attr))
- else:
- return not (attr[:2] == '__' and attr[-2:] == '__')
-
-
-def try_import(mod, only_modules=False):
- mod = mod.rstrip('.')
- try:
- m = __import__(mod)
- except:
- return []
- mods = mod.split('.')
- for module in mods[1:]:
- m = getattr(m, module)
-
- m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
-
- completions = []
- if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
- completions.extend([attr for attr in dir(m) if
- is_importable(m, attr, only_modules)])
-
- completions.extend(getattr(m, '__all__', []))
- if m_is_init:
- completions.extend(module_list(os.path.dirname(m.__file__)))
- completions = {c for c in completions if isinstance(c, string_types)}
- completions.discard('__init__')
- return list(completions)
-
-
-# -----------------------------------------------------------------------------
-# Completion-related functions.
-# -----------------------------------------------------------------------------
-
-def quick_completer(cmd, completions):
- """ Easily create a trivial completer for a command.
-
- Takes either a list of completions, or all completions in string (that will
- be split on whitespace).
-
- Example::
-
- [d:\ipython]|1> import ipy_completers
- [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
- [d:\ipython]|3> foo b
- bar baz
- [d:\ipython]|3> foo ba
- """
-
- if isinstance(completions, string_types):
- completions = completions.split()
-
- def do_complete(self, event):
- return completions
-
- get_ipython().set_hook('complete_command', do_complete, str_key=cmd)
-
-
-def module_completion(line):
- """
- Returns a list containing the completion possibilities for an import line.
-
- The line looks like this :
- 'import xml.d'
- 'from xml.dom import'
- """
-
- words = line.split(' ')
- nwords = len(words)
-
- # from whatever -> 'import '
- if nwords == 3 and words[0] == 'from':
- return ['import ']
-
- # 'from xy' or 'import xy'
- if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}):
- if nwords == 1:
- return get_root_modules()
- mod = words[1].split('.')
- if len(mod) < 2:
- return get_root_modules()
- completion_list = try_import('.'.join(mod[:-1]), True)
- return ['.'.join(mod[:-1] + [el]) for el in completion_list]
-
- # 'from xyz import abc'
- if nwords >= 3 and words[0] == 'from':
- mod = words[1]
- return try_import(mod)
-
-
-# -----------------------------------------------------------------------------
-# Completers
-# -----------------------------------------------------------------------------
-# These all have the func(self, event) signature to be used as custom
-# completers
-
-def module_completer(self, event):
- """Give completions after user has typed 'import ...' or 'from ...'"""
-
- # This works in all versions of python. While 2.5 has
- # pkgutil.walk_packages(), that particular routine is fairly dangerous,
- # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
- # of possibly problematic side effects.
- # This search the folders in the sys.path for available modules.
-
- return module_completion(event.line)
-
-
-# FIXME: there's a lot of logic common to the run, cd and builtin file
-# completers, that is currently reimplemented in each.
-
-def magic_run_completer(self, event):
- """Complete files that end in .py or .ipy or .ipynb for the %run command.
- """
- comps = arg_split(event.line, strict=False)
- # relpath should be the current token that we need to complete.
- if (len(comps) > 1) and (not event.line.endswith(' ')):
- relpath = comps[-1].strip("'\"")
- else:
- relpath = ''
-
- # print("\nev=", event) # dbg
- # print("rp=", relpath) # dbg
- # print('comps=', comps) # dbg
-
- lglob = glob.glob
- isdir = os.path.isdir
- relpath, tilde_expand, tilde_val = expand_user(relpath)
-
- # Find if the user has already typed the first filename, after which we
- # should complete on all files, since after the first one other files may
- # be arguments to the input script.
-
- if any(magic_run_re.match(c) for c in comps):
- matches = [f.replace('\\', '/') + ('/' if isdir(f) else '')
- for f in lglob(relpath + '*')]
- else:
- dirs = [f.replace('\\', '/') + "/" for f in lglob(relpath + '*') if isdir(f)]
- pys = [f.replace('\\', '/')
- for f in lglob(relpath + '*.py') + lglob(relpath + '*.ipy') +
- lglob(relpath + '*.ipynb') + lglob(relpath + '*.pyw')]
-
- matches = dirs + pys
-
- # print('run comp:', dirs+pys) # dbg
- return [compress_user(p, tilde_expand, tilde_val) for p in matches]
-
-
-def cd_completer(self, event):
- """Completer function for cd, which only returns directories."""
- ip = get_ipython()
- relpath = event.symbol
-
- # print(event) # dbg
- if event.line.endswith('-b') or ' -b ' in event.line:
- # return only bookmark completions
- bkms = self.db.get('bookmarks', None)
- if bkms:
- return bkms.keys()
- else:
- return []
-
- if event.symbol == '-':
- width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
- # jump in directory history by number
- fmt = '-%0' + width_dh + 'd [%s]'
- ents = [fmt % (i, s) for i, s in enumerate(ip.user_ns['_dh'])]
- if len(ents) > 1:
- return ents
- return []
-
- if event.symbol.startswith('--'):
- return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
-
- # Expand ~ in path and normalize directory separators.
- relpath, tilde_expand, tilde_val = expand_user(relpath)
- relpath = relpath.replace('\\', '/')
-
- found = []
- for d in [f.replace('\\', '/') + '/' for f in glob.glob(relpath + '*')
- if os.path.isdir(f)]:
- if ' ' in d:
- # we don't want to deal with any of that, complex code
- # for this is elsewhere
- raise TryNext
-
- found.append(d)
-
- if not found:
- if os.path.isdir(relpath):
- return [compress_user(relpath, tilde_expand, tilde_val)]
-
- # if no completions so far, try bookmarks
- bks = self.db.get('bookmarks', {})
- bkmatches = [s for s in bks if s.startswith(event.symbol)]
- if bkmatches:
- return bkmatches
-
- raise TryNext
-
- return [compress_user(p, tilde_expand, tilde_val) for p in found]
-
-
-def reset_completer(self, event):
- "A completer for %reset magic"
- return '-f -s in out array dhist'.split()
diff --git a/venv/lib/python2.7/site-packages/IPython/core/crashhandler.py b/venv/lib/python2.7/site-packages/IPython/core/crashhandler.py
deleted file mode 100644
index 3a9e26a..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/crashhandler.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# encoding: utf-8
-"""sys.excepthook for IPython itself, leaves a detailed report on disk.
-
-Authors:
-
-* Fernando Perez
-* Brian E. Granger
-"""
-
-# -----------------------------------------------------------------------------
-# Copyright (C) 2001-2007 Fernando Perez.
-# Copyright (C) 2008-2011 The IPython Development Team
-#
-# Distributed under the terms of the BSD License. The full license is in
-# the file COPYING, distributed as part of this software.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# Imports
-# -----------------------------------------------------------------------------
-from __future__ import print_function
-
-import os
-import sys
-import traceback
-from pprint import pformat
-
-from IPython.core import ultratb
-from IPython.core.release import author_email
-from IPython.utils.py3compat import input, getcwd
-from IPython.utils.sysinfo import sys_info
-
-# -----------------------------------------------------------------------------
-# Code
-# -----------------------------------------------------------------------------
-
-# Template for the user message.
-_default_message_template = """\
-Oops, {app_name} crashed. We do our best to make it stable, but...
-
-A crash report was automatically generated with the following information:
- - A verbatim copy of the crash traceback.
- - A copy of your input history during this session.
- - Data on your current {app_name} configuration.
-
-It was left in the file named:
-\t'{crash_report_fname}'
-If you can email this file to the developers, the information in it will help
-them in understanding and correcting the problem.
-
-You can mail it to: {contact_name} at {contact_email}
-with the subject '{app_name} Crash Report'.
-
-If you want to do it now, the following command will work (under Unix):
-mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
-
-In your email, please also include information about:
-- The operating system under which the crash happened: Linux, macOS, Windows,
- other, and which exact version (for example: Ubuntu 16.04.3, macOS 10.13.2,
- Windows 10 Pro), and whether it is 32-bit or 64-bit;
-- How {app_name} was installed: using pip or conda, from GitHub, as part of
- a Docker container, or other, providing more detail if possible;
-- How to reproduce the crash: what exact sequence of instructions can one
- input to get the same crash? Ideally, find a minimal yet complete sequence
- of instructions that yields the crash.
-
-To ensure accurate tracking of this issue, please file a report about it at:
-{bug_tracker}
-"""
-
-_lite_message_template = """
-If you suspect this is an IPython bug, please report it at:
- https://github.com/ipython/ipython/issues
-or send an email to the mailing list at {email}
-
-You can print a more detailed traceback right now with "%tb", or use "%debug"
-to interactively debug it.
-
-Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
- {config}Application.verbose_crash=True
-"""
-
-
-class CrashHandler(object):
- """Customizable crash handlers for IPython applications.
-
- Instances of this class provide a :meth:`__call__` method which can be
- used as a ``sys.excepthook``. The :meth:`__call__` signature is::
-
- def __call__(self, etype, evalue, etb)
- """
-
- message_template = _default_message_template
- section_sep = '\n\n' + '*' * 75 + '\n\n'
-
- def __init__(self, app, contact_name=None, contact_email=None,
- bug_tracker=None, show_crash_traceback=True, call_pdb=False):
- """Create a new crash handler
-
- Parameters
- ----------
- app : Application
- A running :class:`Application` instance, which will be queried at
- crash time for internal information.
-
- contact_name : str
- A string with the name of the person to contact.
-
- contact_email : str
- A string with the email address of the contact.
-
- bug_tracker : str
- A string with the URL for your project's bug tracker.
-
- show_crash_traceback : bool
- If false, don't print the crash traceback on stderr, only generate
- the on-disk report
-
- Non-argument instance attributes:
-
- These instances contain some non-argument attributes which allow for
- further customization of the crash handler's behavior. Please see the
- source for further details.
- """
- self.crash_report_fname = "Crash_report_%s.txt" % app.name
- self.app = app
- self.call_pdb = call_pdb
- # self.call_pdb = True # dbg
- self.show_crash_traceback = show_crash_traceback
- self.info = dict(app_name=app.name,
- contact_name=contact_name,
- contact_email=contact_email,
- bug_tracker=bug_tracker,
- crash_report_fname=self.crash_report_fname)
-
- def __call__(self, etype, evalue, etb):
- """Handle an exception, call for compatible with sys.excepthook"""
-
- # do not allow the crash handler to be called twice without reinstalling it
- # this prevents unlikely errors in the crash handling from entering an
- # infinite loop.
- sys.excepthook = sys.__excepthook__
-
- # Report tracebacks shouldn't use color in general (safer for users)
- color_scheme = 'NoColor'
-
- # Use this ONLY for developer debugging (keep commented out for release)
- # color_scheme = 'Linux' # dbg
- try:
- rptdir = self.app.ipython_dir
- except:
- rptdir = getcwd()
- if rptdir is None or not os.path.isdir(rptdir):
- rptdir = getcwd()
- report_name = os.path.join(rptdir, self.crash_report_fname)
- # write the report filename into the instance dict so it can get
- # properly expanded out in the user message template
- self.crash_report_fname = report_name
- self.info['crash_report_fname'] = report_name
- TBhandler = ultratb.VerboseTB(
- color_scheme=color_scheme,
- long_header=1,
- call_pdb=self.call_pdb,
- )
- if self.call_pdb:
- TBhandler(etype, evalue, etb)
- return
- else:
- traceback = TBhandler.text(etype, evalue, etb, context=31)
-
- # print traceback to screen
- if self.show_crash_traceback:
- print(traceback, file=sys.stderr)
-
- # and generate a complete report on disk
- try:
- report = open(report_name, 'w')
- except:
- print('Could not create crash report on disk.', file=sys.stderr)
- return
-
- # Inform user on stderr of what happened
- print('\n' + '*' * 70 + '\n', file=sys.stderr)
- print(self.message_template.format(**self.info), file=sys.stderr)
-
- # Construct report on disk
- report.write(self.make_report(traceback))
- report.close()
- input("Hit to quit (your terminal may close):")
-
- def make_report(self, traceback):
- """Return a string containing a crash report."""
-
- sec_sep = self.section_sep
-
- report = ['*' * 75 + '\n\n' + 'IPython post-mortem report\n\n']
- rpt_add = report.append
- rpt_add(sys_info())
-
- try:
- config = pformat(self.app.config)
- rpt_add(sec_sep)
- rpt_add('Application name: %s\n\n' % self.app_name)
- rpt_add('Current user configuration structure:\n\n')
- rpt_add(config)
- except:
- pass
- rpt_add(sec_sep + 'Crash traceback:\n\n' + traceback)
-
- return ''.join(report)
-
-
-def crash_handler_lite(etype, evalue, tb):
- """a light excepthook, adding a small message to the usual traceback"""
- traceback.print_exception(etype, evalue, tb)
-
- from IPython.core.interactiveshell import InteractiveShell
- if InteractiveShell.initialized():
- # we are in a Shell environment, give %magic example
- config = "%config "
- else:
- # we are not in a shell, show generic config
- config = "c."
- print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/debugger.py b/venv/lib/python2.7/site-packages/IPython/core/debugger.py
deleted file mode 100644
index 8e87142..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/debugger.py
+++ /dev/null
@@ -1,621 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-Pdb debugger class.
-
-Modified from the standard pdb.Pdb class to avoid including readline, so that
-the command line completion of other programs which include this isn't
-damaged.
-
-In the future, this class will be expanded with improvements over the standard
-pdb.
-
-The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
-changes. Licensing should therefore be under the standard Python terms. For
-details on the PSF (Python Software Foundation) standard license, see:
-
-https://docs.python.org/2/license.html
-"""
-
-# *****************************************************************************
-#
-# This file is licensed under the PSF license.
-#
-# Copyright (C) 2001 Python Software Foundation, www.python.org
-# Copyright (C) 2005-2006 Fernando Perez.
-#
-#
-# *****************************************************************************
-from __future__ import print_function
-
-import bdb
-import functools
-import inspect
-import sys
-import warnings
-
-from IPython import get_ipython
-from IPython.core.excolors import exception_colors
-from IPython.testing.skipdoctest import skip_doctest
-from IPython.utils import PyColorize, ulinecache
-from IPython.utils import coloransi, py3compat
-
-prompt = 'ipdb> '
-
-# We have to check this directly from sys.argv, config struct not yet available
-from pdb import Pdb as OldPdb
-
-
-# Allow the set_trace code to operate outside of an ipython instance, even if
-# it does so with some limitations. The rest of this support is implemented in
-# the Tracer constructor.
-
-def make_arrow(pad):
- """generate the leading arrow in front of traceback or debugger"""
- if pad >= 2:
- return '-' * (pad - 2) + '> '
- elif pad == 1:
- return '>'
- return ''
-
-
-def BdbQuit_excepthook(et, ev, tb, excepthook=None):
- """Exception hook which handles `BdbQuit` exceptions.
-
- All other exceptions are processed using the `excepthook`
- parameter.
- """
- warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
- DeprecationWarning, stacklevel=2)
- if et == bdb.BdbQuit:
- print('Exiting Debugger.')
- elif excepthook is not None:
- excepthook(et, ev, tb)
- else:
- # Backwards compatibility. Raise deprecation warning?
- BdbQuit_excepthook.excepthook_ori(et, ev, tb)
-
-
-def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
- warnings.warn(
- "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
- DeprecationWarning, stacklevel=2)
- print('Exiting Debugger.')
-
-
-class Tracer(object):
- """
- DEPRECATED
-
- Class for local debugging, similar to pdb.set_trace.
-
- Instances of this class, when called, behave like pdb.set_trace, but
- providing IPython's enhanced capabilities.
-
- This is implemented as a class which must be initialized in your own code
- and not as a standalone function because we need to detect at runtime
- whether IPython is already active or not. That detection is done in the
- constructor, ensuring that this code plays nicely with a running IPython,
- while functioning acceptably (though with limitations) if outside of it.
- """
-
- @skip_doctest
- def __init__(self, colors=None):
- """
- DEPRECATED
-
- Create a local debugger instance.
-
- Parameters
- ----------
-
- colors : str, optional
- The name of the color scheme to use, it must be one of IPython's
- valid color schemes. If not given, the function will default to
- the current IPython scheme when running inside IPython, and to
- 'NoColor' otherwise.
-
- Examples
- --------
- ::
-
- from IPython.core.debugger import Tracer; debug_here = Tracer()
-
- Later in your code::
-
- debug_here() # -> will open up the debugger at that point.
-
- Once the debugger activates, you can use all of its regular commands to
- step through code, set breakpoints, etc. See the pdb documentation
- from the Python standard library for usage details.
- """
- warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
- "`IPython.core.debugger.Pdb.set_trace()`",
- DeprecationWarning, stacklevel=2)
-
- ip = get_ipython()
- if ip is None:
- # Outside of ipython, we set our own exception hook manually
- sys.excepthook = functools.partial(BdbQuit_excepthook,
- excepthook=sys.excepthook)
- def_colors = 'NoColor'
- else:
- # In ipython, we use its custom exception handler mechanism
- def_colors = ip.colors
- ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
-
- if colors is None:
- colors = def_colors
-
- # The stdlib debugger internally uses a modified repr from the `repr`
- # module, that limits the length of printed strings to a hardcoded
- # limit of 30 characters. That much trimming is too aggressive, let's
- # at least raise that limit to 80 chars, which should be enough for
- # most interactive uses.
- try:
- try:
- from reprlib import aRepr # Py 3
- except ImportError:
- from repr import aRepr # Py 2
- aRepr.maxstring = 80
- except:
- # This is only a user-facing convenience, so any error we encounter
- # here can be warned about but can be otherwise ignored. These
- # printouts will tell us about problems if this API changes
- import traceback
- traceback.print_exc()
-
- self.debugger = Pdb(colors)
-
- def __call__(self):
- """Starts an interactive debugger at the point where called.
-
- This is similar to the pdb.set_trace() function from the std lib, but
- using IPython's enhanced debugger."""
-
- self.debugger.set_trace(sys._getframe().f_back)
-
-
-def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
- """Make new_fn have old_fn's doc string. This is particularly useful
- for the ``do_...`` commands that hook into the help system.
- Adapted from from a comp.lang.python posting
- by Duncan Booth."""
-
- def wrapper(*args, **kw):
- return new_fn(*args, **kw)
-
- if old_fn.__doc__:
- wrapper.__doc__ = old_fn.__doc__ + additional_text
- return wrapper
-
-
-def _file_lines(fname):
- """Return the contents of a named file as a list of lines.
-
- This function never raises an IOError exception: if the file can't be
- read, it simply returns an empty list."""
-
- try:
- outfile = open(fname)
- except IOError:
- return []
- else:
- out = outfile.readlines()
- outfile.close()
- return out
-
-
-class Pdb(OldPdb):
- """Modified Pdb class, does not load readline.
-
- for a standalone version that uses prompt_toolkit, see
- `IPython.terminal.debugger.TerminalPdb` and
- `IPython.terminal.debugger.set_trace()`
- """
-
- def __init__(self, color_scheme=None, completekey=None,
- stdin=None, stdout=None, context=5):
-
- # Parent constructor:
- try:
- self.context = int(context)
- if self.context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
-
- OldPdb.__init__(self, completekey, stdin, stdout)
-
- # IPython changes...
- self.shell = get_ipython()
-
- if self.shell is None:
- save_main = sys.modules['__main__']
- # No IPython instance running, we must create one
- from IPython.terminal.interactiveshell import \
- TerminalInteractiveShell
- self.shell = TerminalInteractiveShell.instance()
- # needed by any code which calls __import__("__main__") after
- # the debugger was entered. See also #9941.
- sys.modules['__main__'] = save_main
-
- if color_scheme is not None:
- warnings.warn(
- "The `color_scheme` argument is deprecated since version 5.1",
- DeprecationWarning)
- else:
- color_scheme = self.shell.colors
-
- self.aliases = {}
-
- # Create color table: we copy the default one from the traceback
- # module and add a few attributes needed for debugging
- self.color_scheme_table = exception_colors()
-
- # shorthands
- C = coloransi.TermColors
- cst = self.color_scheme_table
-
- cst['NoColor'].colors.prompt = C.NoColor
- cst['NoColor'].colors.breakpoint_enabled = C.NoColor
- cst['NoColor'].colors.breakpoint_disabled = C.NoColor
-
- cst['Linux'].colors.prompt = C.Green
- cst['Linux'].colors.breakpoint_enabled = C.LightRed
- cst['Linux'].colors.breakpoint_disabled = C.Red
-
- cst['LightBG'].colors.prompt = C.Blue
- cst['LightBG'].colors.breakpoint_enabled = C.LightRed
- cst['LightBG'].colors.breakpoint_disabled = C.Red
-
- cst['Neutral'].colors.prompt = C.Blue
- cst['Neutral'].colors.breakpoint_enabled = C.LightRed
- cst['Neutral'].colors.breakpoint_disabled = C.Red
-
- self.set_colors(color_scheme)
-
- # Add a python parser so we can syntax highlight source while
- # debugging.
- self.parser = PyColorize.Parser()
-
- # Set the prompt - the default prompt is '(Pdb)'
- self.prompt = prompt
-
- def set_colors(self, scheme):
- """Shorthand access to the color table scheme selector method."""
- self.color_scheme_table.set_active_scheme(scheme)
-
- def interaction(self, frame, traceback):
- try:
- OldPdb.interaction(self, frame, traceback)
- except KeyboardInterrupt:
- sys.stdout.write('\n' + self.shell.get_exception_only())
-
- def new_do_up(self, arg):
- OldPdb.do_up(self, arg)
-
- do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
-
- def new_do_down(self, arg):
- OldPdb.do_down(self, arg)
-
- do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
-
- def new_do_frame(self, arg):
- OldPdb.do_frame(self, arg)
-
- def new_do_quit(self, arg):
-
- if hasattr(self, 'old_all_completions'):
- self.shell.Completer.all_completions = self.old_all_completions
-
- return OldPdb.do_quit(self, arg)
-
- do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
-
- def new_do_restart(self, arg):
- """Restart command. In the context of ipython this is exactly the same
- thing as 'quit'."""
- self.msg("Restart doesn't make sense here. Using 'quit' instead.")
- return self.do_quit(arg)
-
- def print_stack_trace(self, context=None):
- if context is None:
- context = self.context
- try:
- context = int(context)
- if context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
- try:
- for frame_lineno in self.stack:
- self.print_stack_entry(frame_lineno, context=context)
- except KeyboardInterrupt:
- pass
-
- def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
- context=None):
- if context is None:
- context = self.context
- try:
- context = int(context)
- if context <= 0:
- raise ValueError("Context must be a positive integer")
- except (TypeError, ValueError):
- raise ValueError("Context must be a positive integer")
- print(self.format_stack_entry(frame_lineno, '', context))
-
- # vds: >>
- frame, lineno = frame_lineno
- filename = frame.f_code.co_filename
- self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
- # vds: <<
-
- def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
- if context is None:
- context = self.context
- try:
- context = int(context)
- if context <= 0:
- print("Context must be a positive integer")
- except (TypeError, ValueError):
- print("Context must be a positive integer")
- try:
- import reprlib # Py 3
- except ImportError:
- import repr as reprlib # Py 2
-
- ret = []
-
- Colors = self.color_scheme_table.active_colors
- ColorsNormal = Colors.Normal
- tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
- tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
- tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
- tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
- ColorsNormal)
-
- frame, lineno = frame_lineno
-
- return_value = ''
- if '__return__' in frame.f_locals:
- rv = frame.f_locals['__return__']
- # return_value += '->'
- return_value += reprlib.repr(rv) + '\n'
- ret.append(return_value)
-
- # s = filename + '(' + `lineno` + ')'
- filename = self.canonic(frame.f_code.co_filename)
- link = tpl_link % py3compat.cast_unicode(filename)
-
- if frame.f_code.co_name:
- func = frame.f_code.co_name
- else:
- func = ""
-
- call = ''
- if func != '?':
- if '__args__' in frame.f_locals:
- args = reprlib.repr(frame.f_locals['__args__'])
- else:
- args = '()'
- call = tpl_call % (func, args)
-
- # The level info should be generated in the same format pdb uses, to
- # avoid breaking the pdbtrack functionality of python-mode in *emacs.
- if frame is self.curframe:
- ret.append('> ')
- else:
- ret.append(' ')
- ret.append(u'%s(%s)%s\n' % (link, lineno, call))
-
- start = lineno - 1 - context // 2
- lines = ulinecache.getlines(filename)
- start = min(start, len(lines) - context)
- start = max(start, 0)
- lines = lines[start: start + context]
-
- for i, line in enumerate(lines):
- show_arrow = (start + 1 + i == lineno)
- linetpl = (frame is self.curframe or show_arrow) \
- and tpl_line_em \
- or tpl_line
- ret.append(self.__format_line(linetpl, filename,
- start + 1 + i, line,
- arrow=show_arrow))
- return ''.join(ret)
-
- def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
- bp_mark = ""
- bp_mark_color = ""
-
- scheme = self.color_scheme_table.active_scheme_name
- new_line, err = self.parser.format2(line, 'str', scheme)
- if not err: line = new_line
-
- bp = None
- if lineno in self.get_file_breaks(filename):
- bps = self.get_breaks(filename, lineno)
- bp = bps[-1]
-
- if bp:
- Colors = self.color_scheme_table.active_colors
- bp_mark = str(bp.number)
- bp_mark_color = Colors.breakpoint_enabled
- if not bp.enabled:
- bp_mark_color = Colors.breakpoint_disabled
-
- numbers_width = 7
- if arrow:
- # This is the line with the error
- pad = numbers_width - len(str(lineno)) - len(bp_mark)
- num = '%s%s' % (make_arrow(pad), str(lineno))
- else:
- num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
-
- return tpl_line % (bp_mark_color + bp_mark, num, line)
-
- def print_list_lines(self, filename, first, last):
- """The printing (as opposed to the parsing part of a 'list'
- command."""
- try:
- Colors = self.color_scheme_table.active_colors
- ColorsNormal = Colors.Normal
- tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
- tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
- src = []
- if filename == "" and hasattr(self, "_exec_filename"):
- filename = self._exec_filename
-
- for lineno in range(first, last + 1):
- line = ulinecache.getline(filename, lineno)
- if not line:
- break
-
- if lineno == self.curframe.f_lineno:
- line = self.__format_line(tpl_line_em, filename, lineno, line, arrow=True)
- else:
- line = self.__format_line(tpl_line, filename, lineno, line, arrow=False)
-
- src.append(line)
- self.lineno = lineno
-
- print(''.join(src))
-
- except KeyboardInterrupt:
- pass
-
- def do_list(self, arg):
- """Print lines of code from the current stack frame
- """
- self.lastcmd = 'list'
- last = None
- if arg:
- try:
- x = eval(arg, {}, {})
- if type(x) == type(()):
- first, last = x
- first = int(first)
- last = int(last)
- if last < first:
- # Assume it's a count
- last = first + last
- else:
- first = max(1, int(x) - 5)
- except:
- print('*** Error in argument:', repr(arg))
- return
- elif self.lineno is None:
- first = max(1, self.curframe.f_lineno - 5)
- else:
- first = self.lineno + 1
- if last is None:
- last = first + 10
- self.print_list_lines(self.curframe.f_code.co_filename, first, last)
-
- # vds: >>
- lineno = first
- filename = self.curframe.f_code.co_filename
- self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
- # vds: <<
-
- do_l = do_list
-
- def getsourcelines(self, obj):
- lines, lineno = inspect.findsource(obj)
- if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
- # must be a module frame: do not try to cut a block out of it
- return lines, 1
- elif inspect.ismodule(obj):
- return lines, 1
- return inspect.getblock(lines[lineno:]), lineno + 1
-
- def do_longlist(self, arg):
- """Print lines of code from the current stack frame.
-
- Shows more lines than 'list' does.
- """
- self.lastcmd = 'longlist'
- try:
- lines, lineno = self.getsourcelines(self.curframe)
- except OSError as err:
- self.error(err)
- return
- last = lineno + len(lines)
- self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
-
- do_ll = do_longlist
-
- def do_pdef(self, arg):
- """Print the call signature for any callable object.
-
- The debugger interface to %pdef"""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
-
- def do_pdoc(self, arg):
- """Print the docstring for an object.
-
- The debugger interface to %pdoc."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
-
- def do_pfile(self, arg):
- """Print (or run through pager) the file where an object is defined.
-
- The debugger interface to %pfile.
- """
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
-
- def do_pinfo(self, arg):
- """Provide detailed information about an object.
-
- The debugger interface to %pinfo, i.e., obj?."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
-
- def do_pinfo2(self, arg):
- """Provide extra detailed information about an object.
-
- The debugger interface to %pinfo2, i.e., obj??."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
-
- def do_psource(self, arg):
- """Print (or run through pager) the source code for an object."""
- namespaces = [('Locals', self.curframe.f_locals),
- ('Globals', self.curframe.f_globals)]
- self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
-
- if sys.version_info > (3,):
- def do_where(self, arg):
- """w(here)
- Print a stack trace, with the most recent frame at the bottom.
- An arrow indicates the "current frame", which determines the
- context of most commands. 'bt' is an alias for this command.
-
- Take a number as argument as an (optional) number of context line to
- print"""
- if arg:
- context = int(arg)
- self.print_stack_trace(context)
- else:
- self.print_stack_trace()
-
- do_w = do_where
-
-
-def set_trace(frame=None):
- """
- Start debugging from `frame`.
-
- If frame is not specified, debugging starts from caller's frame.
- """
- Pdb().set_trace(frame or sys._getframe().f_back)
diff --git a/venv/lib/python2.7/site-packages/IPython/core/display.py b/venv/lib/python2.7/site-packages/IPython/core/display.py
deleted file mode 100644
index 46330b3..0000000
--- a/venv/lib/python2.7/site-packages/IPython/core/display.py
+++ /dev/null
@@ -1,1308 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Top-level display functions for displaying object in different formats."""
-
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from __future__ import print_function
-
-try:
- from base64 import encodebytes as base64_encode
-except ImportError:
- from base64 import encodestring as base64_encode
-
-from binascii import b2a_hex, hexlify
-import json
-import mimetypes
-import os
-import struct
-import sys
-import warnings
-
-from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
- unicode_type)
-from IPython.testing.skipdoctest import skip_doctest
-
-__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
- 'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
- 'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
- 'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'ProgressBar', 'JSON', 'Javascript',
- 'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
- 'publish_display_data', 'update_display', 'DisplayHandle']
-
-
-# -----------------------------------------------------------------------------
-# utility functions
-# -----------------------------------------------------------------------------
-
-def _safe_exists(path):
- """Check path, but don't let exceptions raise"""
- try:
- return os.path.exists(path)
- except Exception:
- return False
-
-
-def _merge(d1, d2):
- """Like update, but merges sub-dicts instead of clobbering at the top level.
-
- Updates d1 in-place
- """
-
- if not isinstance(d2, dict) or not isinstance(d1, dict):
- return d2
- for key, value in d2.items():
- d1[key] = _merge(d1.get(key), value)
- return d1
-
-
-def _display_mimetype(mimetype, objs, raw=False, metadata=None):
- """internal implementation of all display_foo methods
-
- Parameters
- ----------
- mimetype : str
- The mimetype to be published (e.g. 'image/png')
- objs : tuple of objects
- The Python objects to display, or if raw=True raw text data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- if metadata:
- metadata = {mimetype: metadata}
- if raw:
- # turn list of pngdata into list of { 'image/png': pngdata }
- objs = [{mimetype: obj} for obj in objs]
- display(*objs, raw=raw, metadata=metadata, include=[mimetype])
-
-
-# -----------------------------------------------------------------------------
-# Main functions
-# -----------------------------------------------------------------------------
-# use * to indicate transient is keyword-only
-def publish_display_data(data, metadata=None, source=None, **kwargs):
- """Publish data and metadata to all frontends.
-
- See the ``display_data`` message in the messaging documentation for
- more details about this message type.
-
- The following MIME types are currently implemented:
-
- * text/plain
- * text/html
- * text/markdown
- * text/latex
- * application/json
- * application/javascript
- * image/png
- * image/jpeg
- * image/svg+xml
-
- Parameters
- ----------
- data : dict
- A dictionary having keys that are valid MIME types (like
- 'text/plain' or 'image/svg+xml') and values that are the data for
- that MIME type. The data itself must be a JSON'able data
- structure. Minimally all data should have the 'text/plain' data,
- which can be displayed by all frontends. If more than the plain
- text is given, it is up to the frontend to decide which
- representation to use.
- metadata : dict
- A dictionary for metadata related to the data. This can contain
- arbitrary key, value pairs that frontends can use to interpret
- the data. mime-type keys matching those in data can be used
- to specify metadata about particular representations.
- source : str, deprecated
- Unused.
- transient : dict, keyword-only
- A dictionary of transient data, such as display_id.
- """
- from IPython.core.interactiveshell import InteractiveShell
-
- display_pub = InteractiveShell.instance().display_pub
-
- # only pass transient if supplied,
- # to avoid errors with older ipykernel.
- # TODO: We could check for ipykernel version and provide a detailed upgrade message.
-
- display_pub.publish(
- data=data,
- metadata=metadata,
- **kwargs
- )
-
-
-def _new_id():
- """Generate a new random text id with urandom"""
- return b2a_hex(os.urandom(16)).decode('ascii')
-
-
-def display(*objs, **kwargs):
- """Display a Python object in all frontends.
-
- By default all representations will be computed and sent to the frontends.
- Frontends can decide which representation is used and how.
-
- In terminal IPython this will be similar to using :func:`print`, for use in richer
- frontends see Jupyter notebook examples with rich display logic.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display.
- raw : bool, optional
- Are the objects to be displayed already mimetype-keyed dicts of raw display data,
- or Python objects that need to be formatted before display? [default: False]
- include : list, tuple or set, optional
- A list of format type strings (MIME types) to include in the
- format data dict. If this is set *only* the format types included
- in this list will be computed.
- exclude : list, tuple or set, optional
- A list of format type strings (MIME types) to exclude in the format
- data dict. If this is set all format types will be computed,
- except for those included in this argument.
- metadata : dict, optional
- A dictionary of metadata to associate with the output.
- mime-type keys in this dictionary will be associated with the individual
- representation formats, if they exist.
- transient : dict, optional
- A dictionary of transient data to associate with the output.
- Data in this dict should not be persisted to files (e.g. notebooks).
- display_id : str, bool optional
- Set an id for the display.
- This id can be used for updating this display area later via update_display.
- If given as `True`, generate a new `display_id`
- kwargs: additional keyword-args, optional
- Additional keyword-arguments are passed through to the display publisher.
-
- Returns
- -------
-
- handle: DisplayHandle
- Returns a handle on updatable displays for use with :func:`update_display`,
- if `display_id` is given. Returns :any:`None` if no `display_id` is given
- (default).
-
- Examples
- --------
-
- >>> class Json(object):
- ... def __init__(self, json):
- ... self.json = json
- ... def _repr_pretty_(self, pp, cycle):
- ... import json
- ... pp.text(json.dumps(self.json, indent=2))
- ... def __repr__(self):
- ... return str(self.json)
- ...
-
- >>> d = Json({1:2, 3: {4:5}})
-
- >>> print(d)
- {1: 2, 3: {4: 5}}
-
- >>> display(d)
- {
- "1": 2,
- "3": {
- "4": 5
- }
- }
-
- >>> def int_formatter(integer, pp, cycle):
- ... pp.text('I'*integer)
-
- >>> plain = get_ipython().display_formatter.formatters['text/plain']
- >>> plain.for_type(int, int_formatter)
-
- >>> display(7-5)
- II
-
- >>> del plain.type_printers[int]
- >>> display(7-5)
- 2
-
- See Also
- --------
-
- :func:`update_display`
-
- Notes
- -----
-
- In Python, objects can declare their textual representation using the
- `__repr__` method. IPython expands on this idea and allows objects to declare
- other, rich representations including:
-
- - HTML
- - JSON
- - PNG
- - JPEG
- - SVG
- - LaTeX
-
- A single object can declare some or all of these representations; all are
- handled by IPython's display system.
-
- The main idea of the first approach is that you have to implement special
- display methods when you define your class, one for each representation you
- want to use. Here is a list of the names of the special methods and the
- values they must return:
-
- - `_repr_html_`: return raw HTML as a string
- - `_repr_json_`: return a JSONable dict
- - `_repr_jpeg_`: return raw JPEG data
- - `_repr_png_`: return raw PNG data
- - `_repr_svg_`: return raw SVG data as a string
- - `_repr_latex_`: return LaTeX commands in a string surrounded by "$".
- - `_repr_mimebundle_`: return a full mimebundle containing the mapping
- from all mimetypes to data
-
- When you are directly writing your own classes, you can adapt them for
- display in IPython by following the above approach. But in practice, you
- often need to work with existing classes that you can't easily modify.
-
- You can refer to the documentation on IPython display formatters in order to
- register custom formatters for already existing types.
-
- .. versionadded:: 5.4 display available without import
- .. versionadded:: 6.1 display available without import
-
- Since IPython 5.4 and 6.1 :func:`display` is automatically made available to
- the user without import. If you are using display in a document that might
- be used in a pure python context or with older version of IPython, use the
- following import at the top of your file::
-
- from IPython.display import display
-
- """
- from IPython.core.interactiveshell import InteractiveShell
-
- if not InteractiveShell.initialized():
- # Directly print objects.
- print(*objs)
- return
-
- raw = kwargs.pop('raw', False)
- include = kwargs.pop('include', None)
- exclude = kwargs.pop('exclude', None)
- metadata = kwargs.pop('metadata', None)
- transient = kwargs.pop('transient', None)
- display_id = kwargs.pop('display_id', None)
- if transient is None:
- transient = {}
- if display_id:
- if display_id is True:
- display_id = _new_id()
- transient['display_id'] = display_id
- if kwargs.get('update') and 'display_id' not in transient:
- raise TypeError('display_id required for update_display')
- if transient:
- kwargs['transient'] = transient
-
- if not raw:
- format = InteractiveShell.instance().display_formatter.format
-
- for obj in objs:
- if raw:
- publish_display_data(data=obj, metadata=metadata, **kwargs)
- else:
- format_dict, md_dict = format(obj, include=include, exclude=exclude)
- if not format_dict:
- # nothing to display (e.g. _ipython_display_ took over)
- continue
- if metadata:
- # kwarg-specified metadata gets precedence
- _merge(md_dict, metadata)
- publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
- if display_id:
- return DisplayHandle(display_id)
-
-
-# use * for keyword-only display_id arg
-def update_display(obj, **kwargs):
- """Update an existing display by id
-
- Parameters
- ----------
-
- obj:
- The object with which to update the display
- display_id: keyword-only
- The id of the display to update
-
- See Also
- --------
-
- :func:`display`
- """
- sentinel = object()
- display_id = kwargs.pop('display_id', sentinel)
- if display_id is sentinel:
- raise TypeError("update_display() missing 1 required keyword-only argument: 'display_id'")
- kwargs['update'] = True
- display(obj, display_id=display_id, **kwargs)
-
-
-class DisplayHandle(object):
- """A handle on an updatable display
-
- Call `.update(obj)` to display a new object.
-
- Call `.display(obj`) to add a new instance of this display,
- and update existing instances.
-
- See Also
- --------
-
- :func:`display`, :func:`update_display`
-
- """
-
- def __init__(self, display_id=None):
- if display_id is None:
- display_id = _new_id()
- self.display_id = display_id
-
- def __repr__(self):
- return "<%s display_id=%s>" % (self.__class__.__name__, self.display_id)
-
- def display(self, obj, **kwargs):
- """Make a new display with my id, updating existing instances.
-
- Parameters
- ----------
-
- obj:
- object to display
- **kwargs:
- additional keyword arguments passed to display
- """
- display(obj, display_id=self.display_id, **kwargs)
-
- def update(self, obj, **kwargs):
- """Update existing displays with my id
-
- Parameters
- ----------
-
- obj:
- object to display
- **kwargs:
- additional keyword arguments passed to update_display
- """
- update_display(obj, display_id=self.display_id, **kwargs)
-
-
-def display_pretty(*objs, **kwargs):
- """Display the pretty (default) representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw text data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/plain', objs, **kwargs)
-
-
-def display_html(*objs, **kwargs):
- """Display the HTML representation of an object.
-
- Note: If raw=False and the object does not have a HTML
- representation, no HTML will be shown.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw HTML data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/html', objs, **kwargs)
-
-
-def display_markdown(*objs, **kwargs):
- """Displays the Markdown representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw markdown data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
-
- _display_mimetype('text/markdown', objs, **kwargs)
-
-
-def display_svg(*objs, **kwargs):
- """Display the SVG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw svg data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/svg+xml', objs, **kwargs)
-
-
-def display_png(*objs, **kwargs):
- """Display the PNG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw png data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/png', objs, **kwargs)
-
-
-def display_jpeg(*objs, **kwargs):
- """Display the JPEG representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw JPEG data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('image/jpeg', objs, **kwargs)
-
-
-def display_latex(*objs, **kwargs):
- """Display the LaTeX representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw latex data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('text/latex', objs, **kwargs)
-
-
-def display_json(*objs, **kwargs):
- """Display the JSON representation of an object.
-
- Note that not many frontends support displaying JSON.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw json data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/json', objs, **kwargs)
-
-
-def display_javascript(*objs, **kwargs):
- """Display the Javascript representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw javascript data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/javascript', objs, **kwargs)
-
-
-def display_pdf(*objs, **kwargs):
- """Display the PDF representation of an object.
-
- Parameters
- ----------
- objs : tuple of objects
- The Python objects to display, or if raw=True raw javascript data to
- display.
- raw : bool
- Are the data objects raw data or Python objects that need to be
- formatted before display? [default: False]
- metadata : dict (optional)
- Metadata to be associated with the specific mimetype output.
- """
- _display_mimetype('application/pdf', objs, **kwargs)
-
-
-# -----------------------------------------------------------------------------
-# Smart classes
-# -----------------------------------------------------------------------------
-
-
-class DisplayObject(object):
- """An object that wraps data to be displayed."""
-
- _read_flags = 'r'
- _show_mem_addr = False
-
- def __init__(self, data=None, url=None, filename=None):
- """Create a display object given raw data.
-
- When this object is returned by an expression or passed to the
- display function, it will result in the data being displayed
- in the frontend. The MIME type of the data should match the
- subclasses used, so the Png subclass should be used for 'image/png'
- data. If the data is a URL, the data will first be downloaded
- and then displayed. If
-
- Parameters
- ----------
- data : unicode, str or bytes
- The raw data or a URL or file to load the data from
- url : unicode
- A URL to download the data from.
- filename : unicode
- Path to a local file to load the data from.
- """
- if data is not None and isinstance(data, string_types):
- if data.startswith('http') and url is None:
- url = data
- filename = None
- data = None
- elif _safe_exists(data) and filename is None:
- url = None
- filename = data
- data = None
-
- self.data = data
- self.url = url
- self.filename = None if filename is None else unicode_type(filename)
-
- self.reload()
- self._check_data()
-
- def __repr__(self):
- if not self._show_mem_addr:
- cls = self.__class__
- r = "<%s.%s object>" % (cls.__module__, cls.__name__)
- else:
- r = super(DisplayObject, self).__repr__()
- return r
-
- def _check_data(self):
- """Override in subclasses if there's something to check."""
- pass
-
- def reload(self):
- """Reload the raw data from file or URL."""
- if self.filename is not None:
- with open(self.filename, self._read_flags) as f:
- self.data = f.read()
- elif self.url is not None:
- try:
- try:
- from urllib.request import urlopen # Py3
- except ImportError:
- from urllib2 import urlopen
- response = urlopen(self.url)
- self.data = response.read()
- # extract encoding from header, if there is one:
- encoding = None
- for sub in response.headers['content-type'].split(';'):
- sub = sub.strip()
- if sub.startswith('charset'):
- encoding = sub.split('=')[-1].strip()
- break
- # decode data, if an encoding was specified
- if encoding:
- self.data = self.data.decode(encoding, 'replace')
- except:
- self.data = None
-
-
-class TextDisplayObject(DisplayObject):
- """Validate that display data is text"""
-
- def _check_data(self):
- if self.data is not None and not isinstance(self.data, string_types):
- raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
-
-
-class Pretty(TextDisplayObject):
-
- def _repr_pretty_(self, pp, cycle):
- return pp.text(self.data)
-
-
-class HTML(TextDisplayObject):
-
- def _repr_html_(self):
- return self.data
-
- def __html__(self):
- """
- This method exists to inform other HTML-using modules (e.g. Markupsafe,
- htmltag, etc) that this object is HTML and does not need things like
- special characters (<>&) escaped.
- """
- return self._repr_html_()
-
-
-class Markdown(TextDisplayObject):
-
- def _repr_markdown_(self):
- return self.data
-
-
-class Math(TextDisplayObject):
-
- def _repr_latex_(self):
- s = self.data.strip('$')
- return "$$%s$$" % s
-
-
-class Latex(TextDisplayObject):
-
- def _repr_latex_(self):
- return self.data
-
-
-class SVG(DisplayObject):
- _read_flags = 'rb'
- # wrap data in a property, which extracts the