-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathFOON_generalization.py
2042 lines (1589 loc) · 95.9 KB
/
FOON_generalization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
'''
FOON: Generalization Methods (FOON_generalization):
(last updated: 21st April, 2022):
-------------------------------------------
-- Written and maintained by:
* David Paulius ([email protected] / [email protected])
* Md Sadman Sakib ([email protected])
NOTE: If using this program and/or annotations provided by our lab, please kindly cite our papers
so that others may find our work:
* Paulius et al. 2016 - https://ieeexplore.ieee.org/abstract/document/7759413/
* Paulius et al. 2018 - https://ieeexplore.ieee.org/abstract/document/8460200/
'''
import FOON_classes as FOON
import collections, time, tqdm, os, getopt, sys
# -- write timestamp of when file was created for record-keeping:
from datetime import datetime
last_updated = '21st April, 2022'
# -- verbose flag to print any extra comments or prompts for debugging:
verbose = False
file_name = None
# NOTE: this dictionary is used to store the similarity value between pairs of objects:
object_similarity_index = {}
# NOTE: storing the sense id for objects (based on WordNet) or to denote Concept-Net sense -- these were verified semi-automatically using parser!
FOON_objectSenses = {}
# NOTE: the following are dictionaries used for mapping categories to object labels (for generalization of FOON):
FOON_objectClusters = {}; FOON_objectCluster_index = {}
# NOTE: these lists are used for the generalization of FOON:
# -- Two "generalized" versions of FOON:
# 1. FOON-EXP - expanded version of FOON that uses WordNet/Concept-Net similarities to create new units;
# -- this would use the regular FOON lists from above since we perform expansion and read the new file created
# 2. FOON-GEN - compressed version of FOON that uses object categories
# -- please refer to Paulius et al. 2018 for more explanation on these approaches.
FOON_functionalUnits_compressed = []; FOON_nodes_compressed = []; FOON_outputsToUnits_compressed = {}
# -- is_expansion_done :- flag to indicate whether expansion has already been applied to the loaded FOON graph:
is_expansion_done = False
# -- is_compression_done :- flag to indicate whether compression has already been applied to the loaded FOON graph:
is_compression_done = False
# -- copies of dictionaries from FGA (FOON Graph Analyzer):
FOON_functionalUnits, FOON_nodes, FOON_outputsToUnits, FOON_objectsToUnits, FOON_functionalUnitMap = None, None, None, None, None
FOON_objectLabels, FOON_motionLabels, FOON_stateLabels = {}, {}, {}
# -- references to the word embedding models that will be loaded and possibly re-used:
w2v_conceptnet = None
spacy_model = None
expansion_threshold, expansion_model = None, None
def _copyDicts(data):
# NOTE: in Python, global variables are only global within the scope of a module (i.e., file);
# therefore, this function is used to bypass that by copying references from FGA for the necessary
# lists or dictionaries for search.
# -- assigning references from FGA to the FRT module:
global goal_object_type, goal_state_type
if 'goal_object_type' in data and 'goal_state_type' in data:
goal_object_type, goal_state_type = data['goal_object_type'], data['goal_state_type']
global FOON_functionalUnits, FOON_nodes, FOON_outputsToUnits, FOON_objectsToUnits, FOON_functionalUnitMap
FOON_functionalUnits = data['fu_list']
FOON_nodes = data['nodes_list']
FOON_outputsToUnits = data['outputs_to_fu']
FOON_objectsToUnits = data['objs_to_fu']
FOON_functionalUnitMap = data['fu_to_fu']
# -- assigning references to labels read from index files:
global FOON_objectLabels, FOON_motionLabels, FOON_stateLabels
if 'labels' in data:
FOON_objectLabels = data['labels']['objects']
FOON_motionLabels = data['labels']['motions']
FOON_stateLabels = data['labels']['states']
# -- getting object senses (if needed for WordNet or Concept-Net):
global FOON_objectSenses
if 'obj_senses' in data:
FOON_objectSenses = data['obj_senses']
global file_name
if 'FOON_file_name' in data:
file_name = data['FOON_file_name']
#enddef
def _startGeneralization(args):
if args['method'] == 1:
# -- perform expansion:
expanded_file = _expandNetwork_text()
return expanded_file
else:
# -- perform compression:
compressed_file = _compressNetwork()
return compressed_file
#enddef
def _load_conceptnet_model():
global path_to_ConceptNet, w2v_conceptnet
# -- check if a path has already been specified during run-time:
if not path_to_ConceptNet:
path_to_ConceptNet = input(' -- Enter the full path and name to the Concept-Net model file: > ')
# NOTE: we need to use gensim to load the numberbatch file:
try:
from gensim.models import KeyedVectors
except ImportError:
print(' -- ERROR: Missing gensim library!')
print("\t-- Please install gensim using 'pip install gensim' in terminal.")
print("\t-- Refer to https://radimrehurek.com/gensim/install.html for more details!")
return
# -- load word embeddings for Concept-Net:
try:
w2v_conceptnet = KeyedVectors.load_word2vec_format(path_to_ConceptNet, binary=True)
except:
print(' -- ERROR: Problem loading word2vec model file!')
print('\t-- Please download it here and try again: https://github.com/commonsense/Concept-Net-numberbatch')
#endtry
#enddef
def _load_spacy_model():
global spacy_model
try:
import spacy
except ImportError:
print(' -- ERROR: Missing spaCy library!')
print("\t-- Please install spacy with the instructions here: https://spacy.io/usage")
exit()
try:
#spacy_model = spacy.load("en_core_web_lg")
spacy_model = spacy.load("en_core_web_lg", disable=["tagger", "parser"])
except:
print(' -- ERROR: Problem loading word2vec model file!')
print("\t-- Please download the model via 'python -m spacy download en_core_web_lg' (according to https://spacy.io/usage)")
exit()
#endtry
#enddef
def _computeSimilarity(X, Y, method='spacy'):
# NOTE: this function is used to compute similarity between two words represented as X and Y;
# the idea of similarity is to find out if a word pair can be interchanged for FOON.
# -- there are different models or databases that are used to measure similarity:
# 1. WordNet - classical, hierarchical, lexical dataset, where words are explicitly categorized in a tree-like structure.
# 2. Concept-Net - embedded representation of words and relation types from crawling the web (iirc) as a word embedding model
# 3. SpaCy - large scale, industrial NLP toolkit with pre-trained word embedding models
global FOON_objectLabels
def _computeSimilarity_wordnet():
# -- by default, we will assign a value of 0
sim_value = 0.0
# NOTE: this operation requires WordNet, which is available through NLTK.
# -- Read more about this here: http://www.nltk.org/howto/wordnet.html
try:
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import WordNetError
except ImportError:
print(" -- ERROR: Missing NLTK library and/or WordNet corpus! Cannot build object similarity index!")
print("\t-- Please install it using 'pip install nltk' and then use 'nltk.download()'!")
print("\t-- Refer to https://www.nltk.org/data.html for more details!")
return
# -- this requires definition of senses as used in WordNet:
global FOON_objectSenses
# -- if two words have the same object ID (due to being synonyms, aliases, instances, etc.),
# then we can say that they are exact matches
if FOON_objectLabels[X] == FOON_objectLabels[Y]:
sim_value = 1.0
else:
try:
if verbose:
print(" -- Comparing '" + str(X) + "' with '" + str(Y) + "'...")
# -- creating word objects with the name of the items using NLTK
# NOTE: here, we need to directly indicate WHICH synset definition (sense) should be used for certain words,
# as the first sense is not always food-related. These are manually identified and placed in FOON-object_index.txt file.
word_1 = wn.synset(str(X.replace(' ', '_')) + '.n.0' + str(FOON_objectSenses[X]))
word_2 = wn.synset(str(Y.replace(' ', '_')) + '.n.0' + str(FOON_objectSenses[Y]))
# -- calculate Wu-Palmer metric for the words (for ALL senses);
# Wu-Palmer is typically used, but other metrics can be used (check NLTK documentation!)
sim_value = word_1.wup_similarity(word_2)
except WordNetError or KeyError:
# NOTE: WordNetError means that the object is not found in WordNet
pass
#endtry
#endif
return sim_value
#enddef
def _computeSimilarity_conceptnet():
# NOTE: we will use the numberbatch/word2vec model for Concept-Net,
# which can be downloaded here: https://github.com/commonsense/Concept-Net-numberbatch
global w2v_conceptnet
sim_value = 0
# -- check if the model has not been loaded into memory (only do this once):
if not w2v_conceptnet:
_load_conceptnet_model()
# -- if two words have the same object ID (due to being synonyms, aliases, instances, etc.),
# then we can say that they are exact matches
if FOON_objectLabels[X] == FOON_objectLabels[Y]:
sim_value = 1.0
else:
try:
if verbose:
print(" -- Comparing '" + str(X) + "' with '" + str(Y) + "'...")
# -- need to replace whitespaces with underscores:
word_1 = X.replace(' ', '_'); word_2 = Y.replace(' ', '_')
# NOTE: use gensim's similarity method (in KeyedVectors class) to calculate relatedness:
sim_value = w2v_conceptnet.similarity(str(word_1), str(word_2))
except KeyError:
# NOTE: this means that one of the words or expressions was not in Word2Vec:
pass
#endif
return sim_value
#enddef
def _computeSimilarity_spacy():
# NOTE: we will use the word embedding model provided by the spaCy library,
# which can be downloaded here: https://spacy.io/usage
global spacy_model
# -- check if the model has not been loaded into memory (only do this once):
if not spacy_model:
_load_spacy_model()
# -- if two words have the same object ID (due to being synonyms, aliases, instances, etc.),
# then we can say that they are exact matches
if FOON_objectLabels[X] == FOON_objectLabels[Y]:
sim_value = 1.0
else:
try:
if verbose:
print(" -- Comparing '" + str(X) + "' with '" + str(Y) + "'...")
# -- need to replace whitespaces with underscores:
word_1 = spacy_model(X.replace(' ', '_'))
word_2 = spacy_model(Y.replace(' ', '_'))
if word_1.has_vector is False or word_2.has_vector is False:
# -- if a word does not have an embedded representation, then just give value of 0:
sim_value = 0
else:
# NOTE: use spacy's similarity method to calculate relatedness:
sim_value = word_1.similarity(word_2)
#endif
except KeyError:
# NOTE: this means that one of the words or expressions was not in Word2Vec:
pass
#endif
return sim_value
#enddef
if method == 'WordNet':
similarity_value = _computeSimilarity_wordnet()
elif method == 'ConceptNet':
similarity_value = _computeSimilarity_conceptnet()
else:
similarity_value = _computeSimilarity_spacy()
#endif
return similarity_value
#enddef
def _findObjectSubstitute(O, method="spacy", threshold=0.9, state_scoring=False):
# -- instead of going into the expansion (which may take a long time), just find direct substitutes for
# objects (typically for task tree retrieval's inputs and add them to the kitchen)
# -- list of objects and their similarity values w.r.t. other objects:
global FOON_objectLabels
# -- objective is to populate a list of objects that are similar to this object:
similar_objects = []
this_obj = O.getObjectLabel()
for other_obj in FOON_objectLabels:
if this_obj == other_obj:
# -- be sure to avoid using the same label again:
continue
# -- all similarity values are pre-computed and are found in the object_similarity_index dict:
similarity_value = float(_computeSimilarity(this_obj, other_obj, method=method))
if similarity_value >= threshold:
# -- assign the similar object's name and ID to this newly created object:
new_object = FOON.Object(objectID=FOON_objectLabels[other_obj], objectLabel=other_obj)
# -- copy over the attributes of the object in question (i.e., O) over to the similar objects:
for S in O.getStatesList():
new_object.addNewState( list(S) )
if state_scoring:
# -- the idea is to use word embedding models to determine if a substitute object
# has some association to the states we are about to assign to it:
# -- we will want an average similarity score that exceeds the required threshold:
avg_state_similarity, count = 0.0, 0
# -- for the word vector, we need to replace whitespace with underscores:
object_label = str(other_obj).replace(' ', '_')
for S in new_object.getStatesList():
state_label = S[1].split(' ')
if len(state_label) < 2:
# -- usually single-worded state labels will be better suited for measurement
# and we will avoid states like 'ingredients inside' or 'in bowl'
value = float(_computeSimilarity(object_label, state_label[0], method=method))
avg_state_similarity += value
# -- increment counter for measurable states:
count += 1
#endif
#endfor
# -- if number of measurable states is 0 or average is 0, then do not add this object:
if count == 0 or avg_state_similarity == 0:
continue
# -- if the average score is below the threshold, then do not add this object:
avg_state_similarity = avg_state_similarity / count * 1.0
if avg_state_similarity < threshold:
continue
#endif
similar_objects.append(new_object)
#endif
#endfor
return similar_objects
#enddef
def _prepareExpansion():
# NOTE: this function is used to gather details about the expansion method:
print('\n -- [FOON-gen] : Please provide the following details that are needed for the expansion process:')
global expansion_model, expansion_threshold
if expansion_model is None:
# -- we can either use WordNet or Concept-Net to measure similarity values (note, however, that Wordnet is MUCH faster)
response = input("\n\ta) Perform expansion using:\n\t\t1) WordNet,\n\t\t2) Concept-Net,\n\t\t3) spacy?\t[1/2/3] (default: 3 - spacy) > ")
expansion_model = "spacy"
if response == "1":
expansion_model = "WordNet"
elif response == "2":
expansion_model = "Concept-Net"
else:
print("\n\ta) Expansion method indicated as '" + expansion_model + "'!")
if expansion_threshold is None:
# -- NLTK's word distance metrics return a value that reflects similarity depending on the function used;
# we use the Wu-Palmer metric, which will return a value from 0 to 1.0, where 1.0 means that two words are synonymous
expansion_threshold = 0.9
try:
response = input("\n\tb) Provide a threshold value for object similarity (between 0.0 and 1.0 -- default: 0.9) > ")
expansion_threshold = float(response)
except ValueError:
pass
else:
print("\n\tb) Threshold of " + str(expansion_threshold) + " has already been given!")
# -- instead of attempting to expand a FOON based on all possible objects found in the object index of FOON,
# we can instead permit a customized subset of objects for expansion.
custom_object_list = None
use_custom_list = input("\n\tc) Use all objects for semantic similarity file? [Y/N] (default: Y) > ")
if use_custom_list.lower() == 'n':
custom_object_list = []
# -- NOTE: you will need to create a new file that has subset of object labels for comparison:
_file = open(input(" -- Please type in PATH and NAME to custom object index: > "), 'r')
for L in _file.readlines():
if L.startswith("//"):
continue
_parts = L.split("\t"); _object = _parts[1]
if len(L.split("\t")) == 2:
custom_object_list.append([_object, 1])
else:
custom_object_list.append([_object, _parts[2]])
#endif
#endfor
print(' -- Added a total of ' + str(len(custom_object_list)) + ' items for expansion!')
#endif
# -- idea: use Concept-Net to decide on states being similar to one another (still needs testing):
use_state_suggestion = input('\n\td) Perform state association checking (to see if objects match assigned states)? (default: N) [Y/N] > ')
state_suggestion = True if use_state_suggestion.lower() == "y" else False
print()
return expansion_model, expansion_threshold, custom_object_list, state_suggestion
#enddef
def _expandNetwork_nontext():
expansion_model, given_threshold, custom_object_list, flag_state_check = _prepareExpansion()
# NOTE: first, we start off with the base set of nodes and functional units from level 3:
global FOON_nodes, FOON_functionalUnits, FOON_objectLabels, FOON_objectsToUnits
expanded_FOON, expanded_nodes = [], []
# -- copy over existing objects:
for node in FOON_nodes[2]:
expanded_nodes.append(node)
for FU in FOON_functionalUnits[2]:
expanded_FOON.append(FU)
print('\n -- [FOON-EXP] : Beginning expansion...')
# NOTE: for expansion, we use the highest level of FOON (level 3), since it will be the most "complete" version:
# -- this is why we use "FOON_nodes[2]".
for N in tqdm.tqdm(FOON_nodes[2], desc='Performing expansion...'):
if not isinstance(N, FOON.Object):
# -- remember that nodes list contains both objects and motions together, so skip over motion nodes:
continue
if custom_object_list and N.getObjectLabel() not in custom_object_list:
# -- if there is a pre-defined list of objects to consider for expansion and an
# object is not on the list, skip it.
continue
# -- we look for objects that already exist in FOON to create new objects that are deemed "similar".
expanded_objects = _findObjectSubstitute(N, method=expansion_model,threshold=given_threshold, state_scoring=flag_state_check)
for obj in expanded_objects:
# -- recall that each expanded object will be the same as N, except by the name and ID.
# -- we then make sure that this object does not exist in FOON (specifically, the list of nodes) already:
index = -1
for M in expanded_nodes:
if not isinstance(M, FOON.Object):
continue
# -- check if copied object already exists in the list of nodes:
if obj.equals_functions[2](M):
index = expanded_nodes.index(M)
break
#endif
#endfor
# -- if we did not find the object in the list, then we can proceed to adding the object to FOON's nodes:
if index == -1:
index = len(expanded_nodes)
expanded_nodes.append(obj)
# NOTE: verbose stuff...
if verbose:
print("added:")
expanded_nodes[index].print_functions[2]()
print('to replace:')
N.print_functions[2]()
print('-----------')
#endif
#endif
# -- now that we have made sure the node is there, we then get the appropriate mapping to the copied object as object X:
copiedObject = expanded_nodes[index]
# NEXT, we look at all instances of functional units present in FOON, which we will then be using to copy objects over.
# -- we will look for instances of object Y and then replace with instances of object X.
for FU in FOON_objectsToUnits[2][N]:
newFU = FOON.FunctionalUnit() # -- blank functional unit object to keep track of copy
changed = 0 # -- this variable will tell us if there were any changes made to the functional unit!
# -- we begin by looking at the reference unit's OUTPUT objects...
for M in FU.getOutputNodes():
# NOTE: There are certain cases we need to consider:
# 1) We found the EXACT instance of Y that we want to replace with the copied object of type X.
if N.equals_lvl3(M):
# -- no need to check or adjust the object; we just directly add the newly copied object to the functional unit:
newFU.addObjectNode(copiedObject, is_input=False, is_active_motion=FU.getOutputDescriptor(FU.getOutputList().index(M)))
changed += 1
# 2) We find an object instance of Y but in a different state than what we are looking for in X.
# However, in this case, we are still changing this object from Y to X.
elif N.getObjectType() == M.getObjectType() or N.getObjectLabel() == M.getObjectLabel():
# -- get the right ID, states and label for the new object X and assign it to the adjusted object:
adjustedObject = FOON.Object(objectID=copiedObject.getObjectType(), objectLabel=copiedObject.getObjectLabel())
adjustedObject.setStatesList( list(M.getStatesList()) )
# -- add the object as we normally would to the functional unit:
newFU.addObjectNode(adjustedObject, is_input=False, is_active_motion=FU.getOutputDescriptor(FU.getOutputList().index(M)))
changed += 1
# 3) We found an object that is not of type Y but that may or may not refer to Y such that we need to adjust it to refer to X.
# -- e.g. : when dealing with containers that hold ingredient Y, it should now hold ingredient X:
else:
# -- keep note of the original (remember, non-Y) object's type and label:
adjustedObject = FOON.Object(objectID=M.getObjectType(), objectLabel=M.getObjectLabel())
# -- set the states of the object to the exact states seen for the original object:
adjustedObject.setStatesList( list(M.getStatesList()) )
# -- found_ref :- flag to determine if we found the substituted object name in the ingredients list:
found_ref = False
# 3a) Check the ingredients that are contained in an object for any references to the original object:
adjusted_ingredients = M.getIngredients()
if N.getObjectLabel() in adjusted_ingredients:
adjusted_ingredients[adjusted_ingredients.index(N.getObjectLabel())] = copiedObject.getObjectLabel()
found_ref = True
if found_ref:
# -- set the new object's ingredients to the adjusted list:
adjustedObject.setIngredients(adjusted_ingredients)
# 3b) Maybe the object does not CONTAIN Y, but it may have some relationship to Y, which is reflected by related object value:
for x in range(len(adjustedObject.getStatesList())):
if N.getObjectLabel() == adjustedObject.getRelatedObject(x):
# -- set the related object X to where Y used to be:
adjustedObject.setRelatedObject(x, relatedObj=copiedObject.getObjectLabel())
found_ref = True
#endif
#endfor
if found_ref:
# -- check to see if this object exists already in FOON:
index = -1
for J in expanded_nodes:
if not isinstance(J, FOON.Object):
continue
if adjustedObject.equals_lvl3(J):
index = expanded_nodes.index(J)
break
#endif
#endfor
if index == -1:
index = len(expanded_nodes)
expanded_nodes.append(adjustedObject)
#endif
# -- add the (potentially new) object to the functional unit as normal:
newFU.addObjectNode(expanded_nodes[index], is_input=False, is_active_motion=FU.getOutputDescriptor(FU.getOutputList().index(M)))
changed += 1
else:
# 3) c. This object has not one reference of Y, so we just add this as normal.
newFU.addObjectNode(M, is_input=False, is_active_motion=FU.getOutputDescriptor(FU.getOutputList().index(M)))
#endif
#endif
#endfor
# -- next, we take a look at the reference unit's INPUT objects...
for M in FU.getInputNodes():
# NOTE: There are certain cases we need to consider:
# 1) We found the EXACT instance of Y that we want to replace with the copied object of type X.
if N.equals_lvl3(M):
# -- no need to check or adjust the object; we just directly add the newly copied object to the functional unit:
newFU.addObjectNode(copiedObject, is_input=True, is_active_motion=FU.getInputDescriptor()[FU.getInputList().index(M)])
changed += 1
# 2) We find an object instance of Y but in a different state than what we are looking for in X.
# However, in this case, we are still changing this object from Y to X.
elif N.getObjectType() == M.getObjectType() or N.getObjectLabel() == M.getObjectLabel():
# -- get the right ID, states and label for the new object X and assign it to the adjusted object:
adjustedObject = FOON.Object(objectID=copiedObject.getObjectType(), objectLabel=copiedObject.getObjectLabel())
adjustedObject.setStatesList(M.getStatesList())
# -- add the object as we normally would to the functional unit:
newFU.addObjectNode(adjustedObject, is_input=True, is_active_motion=FU.getInputDescriptor()[FU.getInputList().index(M)])
changed += 1
# 3) We found an object that is not of type Y but that may or may not refer to Y such that we need to adjust it to refer to X.
# -- e.g. : when dealing with containers that hold ingredient Y, it should now hold ingredient X:
else:
# -- keep note of the original (remember, non-Y) object's type and label:
adjustedObject = FOON.Object(objectID=M.getObjectType(), objectLabel=M.getObjectLabel())
# -- set the states of the object to the exact states seen for the original object:
adjustedObject.setStatesList(M.getStatesList())
found_ref = False
# 3a) Check the ingredients that are contained in an object for any references to the original object:
adjusted_ingredients = M.getIngredients()
if N.getObjectLabel() in adjusted_ingredients:
adjusted_ingredients[adjusted_ingredients.index(N.getObjectLabel())] = copiedObject.getObjectLabel()
found_ref = True
if found_ref:
# -- set the new object's ingredients to the adjusted list:
adjustedObject.setIngredients(adjusted_ingredients)
# 3b) Maybe the object does not CONTAIN Y, but it may have some relationship to Y, which is reflected by related object value:
for x in range(len(adjustedObject.getStatesList())):
if N.getObjectLabel() == adjustedObject.getRelatedObject(x):
# -- set the related object X to where Y used to be:
adjustedObject.setRelatedObject(x, relatedObj=copiedObject.getObjectLabel())
found_ref = True
#endif
#endfor
if found_ref:
# -- check to see if this object exists already in FOON:
index = -1
for J in expanded_nodes:
if not isinstance(J, FOON.Object):
continue
if adjustedObject.equals_lvl3(J):
index = expanded_nodes.index(J)
break
#endif
#endfor
if index == -1:
index = len(expanded_nodes)
expanded_nodes.append(adjustedObject)
#endif
# -- add the (potentially new) object to the functional unit as normal:
newFU.addObjectNode(expanded_nodes[index], is_input=True, is_active_motion=FU.getInputDescriptor(FU.getInputList().index(M)))
changed += 1
else:
# 3) c. This object has not one reference of Y, so we just add this as normal.
newFU.addObjectNode(M, is_input=True, is_active_motion=FU.getInputDescriptor(FU.getInputList().index(M)))
#endif
#endif
#endfor
# -- next, we need to create a new motion node of the exact same type and add it to this functional unit;
# however, this will only be added if the functional unit has been changed or not.
tempMotion = FOON.Motion(motionID=FU.getMotion().getMotionType(), motionLabel=FU.getMotion().getMotionLabel())
newFU.setTimes(FU.getStartTime(), FU.getEndTime())
newFU.setSuccessRate(FU.getSuccessRate())
newFU.setIndication(FU.getIndication())
newFU.setMotion(tempMotion)
# FINALLY, we have completed copying the functional unit.
# -- what we do now is add it to the list of candidate units that we want to add to FOON-EXP:
if changed > 0:
# -- add the motion node for the new functional unit we created via expansion:
expanded_nodes.append(tempMotion)
# -- add the expanded functional unit if it does not exist:
already_exists = False
for _unit in expanded_FOON:
if _unit.equals_functions[2](newFU):
already_exists = True
break
if not already_exists:
expanded_FOON.append(newFU)
if verbose:
print('Original:')
FU.printFunctionalUnit_lvl3()
print('Expanded:')
newFU.printFunctionalUnit_lvl3()
input()
#endif
#endif
#endfor
#endfor
#endfor
print(' -- [FOON-EXP] : Total number of expanded functional units: ' + str(len(expanded_FOON)))
# -- now we will write the functional units to a new file:
expanded_file = "expanded_FOON-thr=" + str(given_threshold) + "-method=" + str(expansion_model) + "-NT.txt"
_file = open(expanded_file, 'w')
global file_name
_file.write('# Original File:\t' + file_name)
_file.write('# Date created:\t' + str(datetime.today().strftime('%d.%m.%Y')) + '\n')
_file.write('# Expansion Method:\t' + str(expansion_model) + '\n')
_file.write('# Similarity Threshold:\t' + str(given_threshold) + '\n')
_file.write('//\n')
for FU in tqdm.tqdm(list(expanded_FOON)):
_file.write(FU.getFunctionalUnitText())
print(' -- [FOON-gen] : Expanded universal FOON written to ' + (expanded_file) + "' (using threshold=" + str(given_threshold) + ")!")
_file.close()
# TODO: test this updated function...
global is_expansion_done; is_expansion_done = True
return expanded_file
#enddef
def _expandNetwork_text():
expansion_method, given_threshold, custom_object_list, flag_state_check = _prepareExpansion()
# NOTE: first, we start off with the base set of nodes and functional units from level 3:
global FOON_nodes, FOON_functionalUnits, FOON_objectLabels, FOON_objectsToUnits
# NOTE: We use a set structure to preserve uniqueness among strings of functional units:
new_units = set()
similarity_index = {}
for FU in FOON_functionalUnits[2]:
# -- this way, we note what we already have in our present, regular universal FOON:
new_units.add(FU.getFunctionalUnitText())
for X in tqdm.tqdm(FOON_objectLabels, desc='Performing expansion...'):
if custom_object_list and X not in custom_object_list:
# -- if there is a pre-defined list of objects to consider for expansion and an
# object is not on the list, skip it.
continue
for Y in FOON_objectLabels:
# -- we are looking for different object pairs and finding those that share significant similarity
if X == Y: continue
# -- use memoization if possible, else just look up similarity value:
similarity_value = 0
if (Y in similarity_index and X in similarity_index[Y]) or (X in similarity_index and Y in similarity_index[X]):
similarity_value = 1.0
else:
similarity_value = float(_computeSimilarity(X, Y, method=expansion_method))
#endif
if similarity_value >= given_threshold:
# -- to add some relations for quick lookups:
if X not in similarity_index:
similarity_index[X] = set()
if Y not in similarity_index:
similarity_index[Y] = set()
similarity_index[X].add(Y)
similarity_index[Y].add(X)
for N in FOON_nodes[2]:
if not isinstance(N, FOON.Object):
# -- remember that nodes list contains both objects and motions together, so skip over motion nodes:
continue
for FU in FOON_objectsToUnits[2][N]:
# -- we go through all functional units associated with a given object N:
# NOTE: this is how this section will work:
# 1. Iterate through every input object's text and make changes (while possibly noting relativity of states to objects)
# 2. Interage through every output object's text with same criteria
# 3. Append text for inputs, outputs, and motion to one single string called 'copied_unit'.
# 4. Append single string from previous step to a set of strings referring to expanded units.
copied_unit, skip, found = str(), False, False
for x in range(FU.getNumberOfInputs()):
# -- first, we start with INPUT nodes:
object_text = str(FU.getInputNodeText(x))
if Y in object_text: # NOTE: checking if substring exists in string
# -- this means we found something similar to X that we will be substituting via text:
items = object_text.split('\n')
# NOTE: the following variables will ONLY be used to compute "associativity" of states to the new object:
flag_substitution = False # -- determines if a substitution has been made to this object
found = True
avg_state_similarity, count = 0.0, 0 # -- keep track of cumulative similarity and number of compared states
for y in range(len(items)):
line = items[y]
# NOTE: there are two main parts to check when substituting objects:
# 1. Check the object label and ID
# 2. Check the ingredients for each object
if line.startswith("O"):
# -- parsing through the object labels:
objectParts = line.split("O")
objectParts = objectParts[1].split("\t")
if objectParts[1] == Y:
# -- if we found instance of Y, we substitute with X:
objectParts[0] = "O" + str(FOON_objectLabels[X]); objectParts[1] = X
flag_substitution = True
else:
objectParts[0] = "O" + str(objectParts[0])
#endif
# -- combining new line together:
items[y] = '\t'.join(objectParts)
elif line.startswith("S"):
# -- parsing through the state labels, specifically for INGREDIENTS:
stateParts = line.split("\t"); stateParts = list(filter(None, stateParts))
if flag_substitution and flag_state_check:
# -- this means that we have swapped an object for another, so we should not care about ingredients:
state_label = str(stateParts[1]).split(' ')
# -- usually single-worded state labels will be better suited for measurement
# and we will avoid states like 'ingredients inside' or 'in bowl'
if len(state_label) < 2:
# -- for the word vector, we need to replace whitespace with underscores:
object_label = str(X).replace(' ', '_')
value = float(_computeSimilarity(object_label, state_label[0], method=expansion_method))
avg_state_similarity += value
# -- increment counter for measurable states:
count += 1
#endif
else:
if len(stateParts) > 2:
revised = None
# -- we can either have ingredients (enclosed in {...}) or a relative object (enclosed in [...]):
if '[' in stateParts[2] or ']' in stateParts[2]:
# -- relative object:
ingredients = [ stateParts[2] ]
ingredients = ingredients[0].split("[")
ingredients = ingredients[1].split("]")
revised = '[' + (X if ingredients[0] == Y else ingredients[0]) + ']'
else:
# -- ingredient object:
revised = '{'
ingredients = [ stateParts[2] ]
ingredients = ingredients[0].split("{")
ingredients = ingredients[1].split("}")
# -- we then need to make sure that there are ingredients to be read:
if len(ingredients) > 0:
ingredients = ingredients[0].split(",")
for i in range(len(ingredients)):
if ingredients[i] == Y:
ingredients[i] = X
revised += ingredients[i]
if i < (len(ingredients) - 1):
revised += ','
revised += '}'
#endif
stateParts[2] = revised
#endif
#endif
items[y] = '\t'.join(stateParts)
#endif
#endfor
if flag_state_check:
if count == 0:
copied_unit += '\n'.join(items)
else:
avg_state_similarity = avg_state_similarity / count * 1.0
if avg_state_similarity >= 0.3:
# -- this means that the states are somewhat related to the new object:
copied_unit += '\n'.join(items)
else:
# -- if it is below threshold, then we cannot safely substitute the object, so we should skip adding this functional unit:
skip = True
#endif
#endif
else:
copied_unit += '\n'.join(items)
#endif
else:
# -- this means the object has no trace of the substituted object:
copied_unit += object_text
#endif
#endfor
if skip or copied_unit == '':
continue
copied_unit += FU.getMotionForFile()
for x in range(FU.getNumberOfOutputs()):
# -- next, we look at OUPUT nodes:
object_text = str(FU.getOutputNodeText(x))
if Y in object_text: # NOTE: checking if substring exists in string
# -- this means we found something similar to X that we will be substituting via text:
items = object_text.split('\n')
# NOTE: the following variables will ONLY be used to compute "associativity" of states to the new object:
flag_substitution = False # -- determines if a substitution has been made to this object
found = True
avg_state_similarity, count = 0.0, 0 # -- keep track of cumulative similarity and number of compared states
for y in range(len(items)):
line = items[y]
# NOTE: there are two main parts to check when substituting objects:
# 1. Check the object label and ID
# 2. Check the ingredients for each object
if line.startswith("O"):
# -- parsing through the object labels:
objectParts = line.split("O"); objectParts = objectParts[1].split("\t")
if objectParts[1] == Y:
# -- if we found instance of Y, we substitute with X:
objectParts[0] = "O" + str(FOON_objectLabels[X]); objectParts[1] = X
flag_substitution = True
else:
objectParts[0] = "O" + str(objectParts[0])
#endif
# -- combining new line together:
items[y] = '\t'.join(objectParts)
elif line.startswith("S"):
# -- parsing through the state labels, specifically for INGREDIENTS:
stateParts = line.split("\t"); stateParts = list(filter(None, stateParts))
if flag_substitution and flag_state_check:
# -- this means that we have swapped an object for another, so we should not care about ingredients:
state_label = str(stateParts[1]).split(' ')
# -- usually single-worded state labels will be better suited for measurement
# and we will avoid states like 'ingredients inside' or 'in bowl'
if len(state_label) < 2:
# -- for the word vector, we need to replace whitespace with underscores:
object_label = str(X).replace(' ', '_')
value = float(_computeSimilarity(object_label, state_label[0], method=expansion_method))
avg_state_similarity += value
# -- increment counter for measurable states:
count += 1
#endif
else:
if len(stateParts) > 2:
revised = None
# -- we can either have ingredients (enclosed in {...}) or a relative object (enclosed in [...]):
if '[' in stateParts[2] or ']' in stateParts[2]:
# -- relative object:
ingredients = [ stateParts[2] ]
ingredients = ingredients[0].split("[")
ingredients = ingredients[1].split("]")
revised = '[' + (X if ingredients[0] == Y else ingredients[0]) + ']'
else:
# -- ingredient object:
revised = '{'
ingredients = [ stateParts[2] ]
ingredients = ingredients[0].split("{")
ingredients = ingredients[1].split("}")
# -- we then need to make sure that there are ingredients to be read:
if len(ingredients) > 0:
ingredients = ingredients[0].split(",")
for i in range(len(ingredients)):