-
Notifications
You must be signed in to change notification settings - Fork 2.2k
/
Copy pathsat_parameters.proto
1600 lines (1335 loc) · 78.7 KB
/
sat_parameters.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package operations_research.sat;
option java_package = "com.google.ortools.sat";
option java_multiple_files = true;
option csharp_namespace = "Google.OrTools.Sat";
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 296
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
optional string name = 171 [default = ""];
// ==========================================================================
// Branching and polarity
// ==========================================================================
// Variables without activity (i.e. at the beginning of the search) will be
// tried in this preferred order.
enum VariableOrder {
IN_ORDER = 0; // As specified by the problem.
IN_REVERSE_ORDER = 1;
IN_RANDOM_ORDER = 2;
}
optional VariableOrder preferred_variable_order = 1 [default = IN_ORDER];
// Specifies the initial polarity (true/false) when the solver branches on a
// variable. This can be modified later by the user, or the phase saving
// heuristic.
//
// Note(user): POLARITY_FALSE is usually a good choice because of the
// "natural" way to express a linear boolean problem.
enum Polarity {
POLARITY_TRUE = 0;
POLARITY_FALSE = 1;
POLARITY_RANDOM = 2;
}
optional Polarity initial_polarity = 2 [default = POLARITY_FALSE];
// If this is true, then the polarity of a variable will be the last value it
// was assigned to, or its default polarity if it was never assigned since the
// call to ResetDecisionHeuristic().
//
// Actually, we use a newer version where we follow the last value in the
// longest non-conflicting partial assignment in the current phase.
//
// This is called 'literal phase saving'. For details see 'A Lightweight
// Component Caching Scheme for Satisfiability Solvers' K. Pipatsrisawat and
// A.Darwiche, In 10th International Conference on Theory and Applications of
// Satisfiability Testing, 2007.
optional bool use_phase_saving = 44 [default = true];
// If non-zero, then we change the polarity heuristic after that many number
// of conflicts in an arithmetically increasing fashion. So x the first time,
// 2 * x the second time, etc...
optional int32 polarity_rephase_increment = 168 [default = 1000];
// The proportion of polarity chosen at random. Note that this take
// precedence over the phase saving heuristic. This is different from
// initial_polarity:POLARITY_RANDOM because it will select a new random
// polarity each time the variable is branched upon instead of selecting one
// initially and then always taking this choice.
optional double random_polarity_ratio = 45 [default = 0.0];
// A number between 0 and 1 that indicates the proportion of branching
// variables that are selected randomly instead of choosing the first variable
// from the given variable_ordering strategy.
optional double random_branches_ratio = 32 [default = 0.0];
// Whether we use the ERWA (Exponential Recency Weighted Average) heuristic as
// described in "Learning Rate Based Branching Heuristic for SAT solvers",
// J.H.Liang, V. Ganesh, P. Poupart, K.Czarnecki, SAT 2016.
optional bool use_erwa_heuristic = 75 [default = false];
// The initial value of the variables activity. A non-zero value only make
// sense when use_erwa_heuristic is true. Experiments with a value of 1e-2
// together with the ERWA heuristic showed slighthly better result than simply
// using zero. The idea is that when the "learning rate" of a variable becomes
// lower than this value, then we prefer to branch on never explored before
// variables. This is not in the ERWA paper.
optional double initial_variables_activity = 76 [default = 0.0];
// When this is true, then the variables that appear in any of the reason of
// the variables in a conflict have their activity bumped. This is addition to
// the variables in the conflict, and the one that were used during conflict
// resolution.
optional bool also_bump_variables_in_conflict_reasons = 77 [default = false];
// ==========================================================================
// Conflict analysis
// ==========================================================================
// Do we try to minimize conflicts (greedily) when creating them.
enum ConflictMinimizationAlgorithm {
NONE = 0;
SIMPLE = 1;
RECURSIVE = 2;
EXPERIMENTAL = 3;
}
optional ConflictMinimizationAlgorithm minimization_algorithm = 4
[default = RECURSIVE];
// Whether to expoit the binary clause to minimize learned clauses further.
enum BinaryMinizationAlgorithm {
NO_BINARY_MINIMIZATION = 0;
BINARY_MINIMIZATION_FIRST = 1;
BINARY_MINIMIZATION_FIRST_WITH_TRANSITIVE_REDUCTION = 4;
BINARY_MINIMIZATION_WITH_REACHABILITY = 2;
EXPERIMENTAL_BINARY_MINIMIZATION = 3;
}
optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34
[default = BINARY_MINIMIZATION_FIRST];
// At a really low cost, during the 1-UIP conflict computation, it is easy to
// detect if some of the involved reasons are subsumed by the current
// conflict. When this is true, such clauses are detached and later removed
// from the problem.
optional bool subsumption_during_conflict_analysis = 56 [default = true];
// ==========================================================================
// Clause database management
// ==========================================================================
// Trigger a cleanup when this number of "deletable" clauses is learned.
optional int32 clause_cleanup_period = 11 [default = 10000];
// During a cleanup, we will always keep that number of "deletable" clauses.
// Note that this doesn't include the "protected" clauses.
optional int32 clause_cleanup_target = 13 [default = 0];
// During a cleanup, if clause_cleanup_target is 0, we will delete the
// clause_cleanup_ratio of "deletable" clauses instead of aiming for a fixed
// target of clauses to keep.
optional double clause_cleanup_ratio = 190 [default = 0.5];
// Each time a clause activity is bumped, the clause has a chance to be
// protected during the next cleanup phase. Note that clauses used as a reason
// are always protected.
enum ClauseProtection {
PROTECTION_NONE = 0; // No protection.
PROTECTION_ALWAYS = 1; // Protect all clauses whose activity is bumped.
PROTECTION_LBD = 2; // Only protect clause with a better LBD.
}
optional ClauseProtection clause_cleanup_protection = 58
[default = PROTECTION_NONE];
// All the clauses with a LBD (literal blocks distance) lower or equal to this
// parameters will always be kept.
optional int32 clause_cleanup_lbd_bound = 59 [default = 5];
// The clauses that will be kept during a cleanup are the ones that come
// first under this order. We always keep or exclude ties together.
enum ClauseOrdering {
// Order clause by decreasing activity, then by increasing LBD.
CLAUSE_ACTIVITY = 0;
// Order clause by increasing LBD, then by decreasing activity.
CLAUSE_LBD = 1;
}
optional ClauseOrdering clause_cleanup_ordering = 60
[default = CLAUSE_ACTIVITY];
// Same as for the clauses, but for the learned pseudo-Boolean constraints.
optional int32 pb_cleanup_increment = 46 [default = 200];
optional double pb_cleanup_ratio = 47 [default = 0.5];
// ==========================================================================
// Variable and clause activities
// ==========================================================================
// Each time a conflict is found, the activities of some variables are
// increased by one. Then, the activity of all variables are multiplied by
// variable_activity_decay.
//
// To implement this efficiently, the activity of all the variables is not
// decayed at each conflict. Instead, the activity increment is multiplied by
// 1 / decay. When an activity reach max_variable_activity_value, all the
// activity are multiplied by 1 / max_variable_activity_value.
optional double variable_activity_decay = 15 [default = 0.8];
optional double max_variable_activity_value = 16 [default = 1e100];
// The activity starts at 0.8 and increment by 0.01 every 5000 conflicts until
// 0.95. This "hack" seems to work well and comes from:
//
// Glucose 2.3 in the SAT 2013 Competition - SAT Competition 2013
// http://edacc4.informatik.uni-ulm.de/SC13/solver-description-download/136
optional double glucose_max_decay = 22 [default = 0.95];
optional double glucose_decay_increment = 23 [default = 0.01];
optional int32 glucose_decay_increment_period = 24 [default = 5000];
// Clause activity parameters (same effect as the one on the variables).
optional double clause_activity_decay = 17 [default = 0.999];
optional double max_clause_activity_value = 18 [default = 1e20];
// ==========================================================================
// Restart
// ==========================================================================
// Restart algorithms.
//
// A reference for the more advanced ones is:
// Gilles Audemard, Laurent Simon, "Refining Restarts Strategies for SAT
// and UNSAT", Principles and Practice of Constraint Programming Lecture
// Notes in Computer Science 2012, pp 118-126
enum RestartAlgorithm {
NO_RESTART = 0;
// Just follow a Luby sequence times restart_period.
LUBY_RESTART = 1;
// Moving average restart based on the decision level of conflicts.
DL_MOVING_AVERAGE_RESTART = 2;
// Moving average restart based on the LBD of conflicts.
LBD_MOVING_AVERAGE_RESTART = 3;
// Fixed period restart every restart period.
FIXED_RESTART = 4;
}
// The restart strategies will change each time the strategy_counter is
// increased. The current strategy will simply be the one at index
// strategy_counter modulo the number of strategy. Note that if this list
// includes a NO_RESTART, nothing will change when it is reached because the
// strategy_counter will only increment after a restart.
//
// The idea of switching of search strategy tailored for SAT/UNSAT comes from
// Chanseok Oh with his COMiniSatPS solver, see http://cs.nyu.edu/~chanseok/.
// But more generally, it seems REALLY beneficial to try different strategy.
repeated RestartAlgorithm restart_algorithms = 61;
optional string default_restart_algorithms = 70
[default =
"LUBY_RESTART,LBD_MOVING_AVERAGE_RESTART,DL_MOVING_AVERAGE_RESTART"];
// Restart period for the FIXED_RESTART strategy. This is also the multiplier
// used by the LUBY_RESTART strategy.
optional int32 restart_period = 30 [default = 50];
// Size of the window for the moving average restarts.
optional int32 restart_running_window_size = 62 [default = 50];
// In the moving average restart algorithms, a restart is triggered if the
// window average times this ratio is greater that the global average.
optional double restart_dl_average_ratio = 63 [default = 1.0];
optional double restart_lbd_average_ratio = 71 [default = 1.0];
// Block a moving restart algorithm if the trail size of the current conflict
// is greater than the multiplier times the moving average of the trail size
// at the previous conflicts.
optional bool use_blocking_restart = 64 [default = false];
optional int32 blocking_restart_window_size = 65 [default = 5000];
optional double blocking_restart_multiplier = 66 [default = 1.4];
// After each restart, if the number of conflict since the last strategy
// change is greater that this, then we increment a "strategy_counter" that
// can be use to change the search strategy used by the following restarts.
optional int32 num_conflicts_before_strategy_changes = 68 [default = 0];
// The parameter num_conflicts_before_strategy_changes is increased by that
// much after each strategy change.
optional double strategy_change_increase_ratio = 69 [default = 0.0];
// ==========================================================================
// Limits
// ==========================================================================
// Maximum time allowed in seconds to solve a problem.
// The counter will starts at the beginning of the Solve() call.
optional double max_time_in_seconds = 36 [default = inf];
// Maximum time allowed in deterministic time to solve a problem.
// The deterministic time should be correlated with the real time used by the
// solver, the time unit being as close as possible to a second.
optional double max_deterministic_time = 67 [default = inf];
// Stops after that number of batches has been scheduled. This only make sense
// when interleave_search is true.
optional int32 max_num_deterministic_batches = 291 [default = 0];
// Maximum number of conflicts allowed to solve a problem.
//
// TODO(user): Maybe change the way the conflict limit is enforced?
// currently it is enforced on each independent internal SAT solve, rather
// than on the overall number of conflicts across all solves. So in the
// context of an optimization problem, this is not really usable directly by a
// client.
optional int64 max_number_of_conflicts = 37
[default = 0x7FFFFFFFFFFFFFFF]; // kint64max
// Maximum memory allowed for the whole thread containing the solver. The
// solver will abort as soon as it detects that this limit is crossed. As a
// result, this limit is approximative, but usually the solver will not go too
// much over.
//
// TODO(user): This is only used by the pure SAT solver, generalize to CP-SAT.
optional int64 max_memory_in_mb = 40 [default = 10000];
// Stop the search when the gap between the best feasible objective (O) and
// our best objective bound (B) is smaller than a limit.
// The exact definition is:
// - Absolute: abs(O - B)
// - Relative: abs(O - B) / max(1, abs(O)).
//
// Important: The relative gap depends on the objective offset! If you
// artificially shift the objective, you will get widely different value of
// the relative gap.
//
// Note that if the gap is reached, the search status will be OPTIMAL. But
// one can check the best objective bound to see the actual gap.
//
// If the objective is integer, then any absolute gap < 1 will lead to a true
// optimal. If the objective is floating point, a gap of zero make little
// sense so is is why we use a non-zero default value. At the end of the
// search, we will display a warning if OPTIMAL is reported yet the gap is
// greater than this absolute gap.
optional double absolute_gap_limit = 159 [default = 1e-4];
optional double relative_gap_limit = 160 [default = 0.0];
// ==========================================================================
// Other parameters
// ==========================================================================
// At the beginning of each solve, the random number generator used in some
// part of the solver is reinitialized to this seed. If you change the random
// seed, the solver may make different choices during the solving process.
//
// For some problems, the running time may vary a lot depending on small
// change in the solving algorithm. Running the solver with different seeds
// enables to have more robust benchmarks when evaluating new features.
optional int32 random_seed = 31 [default = 1];
// This is mainly here to test the solver variability. Note that in tests, if
// not explicitly set to false, all 3 options will be set to true so that
// clients do not rely on the solver returning a specific solution if they are
// many equivalent optimal solutions.
optional bool permute_variable_randomly = 178 [default = false];
optional bool permute_presolve_constraint_order = 179 [default = false];
optional bool use_absl_random = 180 [default = false];
// Whether the solver should log the search progress. This is the maing
// logging parameter and if this is false, none of the logging (callbacks,
// log_to_stdout, log_to_response, ...) will do anything.
optional bool log_search_progress = 41 [default = false];
// Whether the solver should display per sub-solver search statistics.
// This is only useful is log_search_progress is set to true, and if the
// number of search workers is > 1. Note that in all case we display a bit
// of stats with one line per subsolver.
optional bool log_subsolver_statistics = 189 [default = false];
// Add a prefix to all logs.
optional string log_prefix = 185 [default = ""];
// Log to stdout.
optional bool log_to_stdout = 186 [default = true];
// Log to response proto.
optional bool log_to_response = 187 [default = false];
// Whether to use pseudo-Boolean resolution to analyze a conflict. Note that
// this option only make sense if your problem is modelized using
// pseudo-Boolean constraints. If you only have clauses, this shouldn't change
// anything (except slow the solver down).
optional bool use_pb_resolution = 43 [default = false];
// A different algorithm during PB resolution. It minimizes the number of
// calls to ReduceCoefficients() which can be time consuming. However, the
// search space will be different and if the coefficients are large, this may
// lead to integer overflows that could otherwise be prevented.
optional bool minimize_reduction_during_pb_resolution = 48 [default = false];
// Whether or not the assumption levels are taken into account during the LBD
// computation. According to the reference below, not counting them improves
// the solver in some situation. Note that this only impact solves under
// assumptions.
//
// Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for
// Incremental SAT Solving with Assumptions: Application to MUS Extraction"
// Theory and Applications of Satisfiability Testing - SAT 2013, Lecture Notes
// in Computer Science Volume 7962, 2013, pp 309-317.
optional bool count_assumption_levels_in_lbd = 49 [default = true];
// ==========================================================================
// Presolve
// ==========================================================================
// During presolve, only try to perform the bounded variable elimination (BVE)
// of a variable x if the number of occurrences of x times the number of
// occurrences of not(x) is not greater than this parameter.
optional int32 presolve_bve_threshold = 54 [default = 500];
// During presolve, we apply BVE only if this weight times the number of
// clauses plus the number of clause literals is not increased.
optional int32 presolve_bve_clause_weight = 55 [default = 3];
// The maximum "deterministic" time limit to spend in probing. A value of
// zero will disable the probing.
//
// TODO(user): Clean up. The first one is used in CP-SAT, the other in pure
// SAT presolve.
optional double probing_deterministic_time_limit = 226 [default = 1.0];
optional double presolve_probing_deterministic_time_limit = 57
[default = 30.0];
// Whether we use an heuristic to detect some basic case of blocked clause
// in the SAT presolve.
optional bool presolve_blocked_clause = 88 [default = true];
// Whether or not we use Bounded Variable Addition (BVA) in the presolve.
optional bool presolve_use_bva = 72 [default = true];
// Apply Bounded Variable Addition (BVA) if the number of clauses is reduced
// by stricly more than this threshold. The algorithm described in the paper
// uses 0, but quick experiments showed that 1 is a good value. It may not be
// worth it to add a new variable just to remove one clause.
optional int32 presolve_bva_threshold = 73 [default = 1];
// In case of large reduction in a presolve iteration, we perform multiple
// presolve iterations. This parameter controls the maximum number of such
// presolve iterations.
optional int32 max_presolve_iterations = 138 [default = 3];
// Whether we presolve the cp_model before solving it.
optional bool cp_model_presolve = 86 [default = true];
// How much effort do we spend on probing. 0 disables it completely.
optional int32 cp_model_probing_level = 110 [default = 2];
// Whether we also use the sat presolve when cp_model_presolve is true.
optional bool cp_model_use_sat_presolve = 93 [default = true];
// If true, we detect variable that are unique to a table constraint and only
// there to encode a cost on each tuple. This is usually the case when a WCSP
// (weighted constraint program) is encoded into CP-SAT format.
//
// This can lead to a dramatic speed-up for such problems but is still
// experimental at this point.
optional bool detect_table_with_cost = 216 [default = false];
// How much we try to "compress" a table constraint. Compressing more leads to
// less Booleans and faster propagation but can reduced the quality of the lp
// relaxation. Values goes from 0 to 3 where we always try to fully compress a
// table. At 2, we try to automatically decide if it is worth it.
optional int32 table_compression_level = 217 [default = 2];
// If true, expand all_different constraints that are not permutations.
// Permutations (#Variables = #Values) are always expanded.
optional bool expand_alldiff_constraints = 170 [default = false];
// If true, expand the reservoir constraints by creating booleans for all
// possible precedences between event and encoding the constraint.
optional bool expand_reservoir_constraints = 182 [default = true];
// Mainly useful for testing.
//
// If this and expand_reservoir_constraints is true, we use a different
// encoding of the reservoir constraint using circuit instead of precedences.
// Note that this is usually slower, but can exercise different part of the
// solver. Note that contrary to the precedence encoding, this easily support
// variable demands.
//
// WARNING: with this encoding, the constraint take a slighlty different
// meaning. The level must be within the reservoir for any permutation of the
// events. So we cannot have +100 and -100 at the same time if the maximum
// level is 10 (as autorized by the reservoir constraint).
optional bool expand_reservoir_using_circuit = 288 [default = false];
// Encore cumulative with fixed demands and capacity as a reservoir
// constraint. The only reason you might want to do that is to test the
// reservoir propagation code!
optional bool encode_cumulative_as_reservoir = 287 [default = false];
// If the number of expressions in the lin_max is less that the max size
// parameter, model expansion replaces target = max(xi) by linear constraint
// with the introduction of new booleans bi such that bi => target == xi.
//
// This is mainly for experimenting compared to a custom lin_max propagator.
optional int32 max_lin_max_size_for_expansion = 280 [default = 0];
// If true, it disable all constraint expansion.
// This should only be used to test the presolve of expanded constraints.
optional bool disable_constraint_expansion = 181 [default = false];
// Linear constraint with a complex right hand side (more than a single
// interval) need to be expanded, there is a couple of way to do that.
optional bool encode_complex_linear_constraint_with_integer = 223
[default = false];
// During presolve, we use a maximum clique heuristic to merge together
// no-overlap constraints or at most one constraints. This code can be slow,
// so we have a limit in place on the number of explored nodes in the
// underlying graph. The internal limit is an int64, but we use double here to
// simplify manual input.
optional double merge_no_overlap_work_limit = 145 [default = 1e12];
optional double merge_at_most_one_work_limit = 146 [default = 1e8];
// How much substitution (also called free variable aggregation in MIP
// litterature) should we perform at presolve. This currently only concerns
// variable appearing only in linear constraints. For now the value 0 turns it
// off and any positive value performs substitution.
optional int32 presolve_substitution_level = 147 [default = 1];
// If true, we will extract from linear constraints, enforcement literals of
// the form "integer variable at bound => simplified constraint". This should
// always be beneficial except that we don't always handle them as efficiently
// as we could for now. This causes problem on manna81.mps (LP relaxation not
// as tight it seems) and on neos-3354841-apure.mps.gz (too many literals
// created this way).
optional bool presolve_extract_integer_enforcement = 174 [default = false];
// A few presolve operations involve detecting constraints included in other
// constraint. Since there can be a quadratic number of such pairs, and
// processing them usually involve scanning them, the complexity of these
// operations can be big. This enforce a local deterministic limit on the
// number of entries scanned. Default is 1e8.
//
// A value of zero will disable these presolve rules completely.
optional int64 presolve_inclusion_work_limit = 201 [default = 100000000];
// If true, we don't keep names in our internal copy of the user given model.
optional bool ignore_names = 202 [default = true];
// Run a max-clique code amongst all the x != y we can find and try to infer
// set of variables that are all different. This allows to close neos16.mps
// for instance. Note that we only run this code if there is no all_diff
// already in the model so that if a user want to add some all_diff, we assume
// it is well done and do not try to add more.
//
// This will also detect and add no_overlap constraints, if all the relations
// x != y have "offsets" between them. I.e. x > y + offset.
optional bool infer_all_diffs = 233 [default = true];
// Try to find large "rectangle" in the linear constraint matrix with
// identical lines. If such rectangle is big enough, we can introduce a new
// integer variable corresponding to the common expression and greatly reduce
// the number of non-zero.
optional bool find_big_linear_overlap = 234 [default = true];
// ==========================================================================
// Inprocessing
// ==========================================================================
// Enable or disable "inprocessing" which is some SAT presolving done at
// each restart to the root level.
optional bool use_sat_inprocessing = 163 [default = true];
// Proportion of deterministic time we should spend on inprocessing.
// At each "restart", if the proportion is below this ratio, we will do some
// inprocessing, otherwise, we skip it for this restart.
optional double inprocessing_dtime_ratio = 273 [default = 0.2];
// The amount of dtime we should spend on probing for each inprocessing round.
optional double inprocessing_probing_dtime = 274 [default = 1.0];
// Parameters for an heuristic similar to the one described in "An effective
// learnt clause minimization approach for CDCL Sat Solvers",
// https://www.ijcai.org/proceedings/2017/0098.pdf
//
// This is the amount of dtime we should spend on this technique during each
// inprocessing phase.
//
// The minimization technique is the same as the one used to minimize core in
// max-sat. We also minimize problem clauses and not just the learned clause
// that we keep forever like in the paper.
optional double inprocessing_minimization_dtime = 275 [default = 1.0];
// ==========================================================================
// Multithread
// ==========================================================================
// Specify the number of parallel workers (i.e. threads) to use during search.
// This should usually be lower than your number of available cpus +
// hyperthread in your machine.
//
// A value of 0 means the solver will try to use all cores on the machine.
// A number of 1 means no parallelism.
//
// Note that 'num_workers' is the preferred name, but if it is set to zero,
// we will still read the deprecated 'num_search_workers'.
//
// As of 2020-04-10, if you're using SAT via MPSolver (to solve integer
// programs) this field is overridden with a value of 8, if the field is not
// set *explicitly*. Thus, always set this field explicitly or via
// MPSolver::SetNumThreads().
optional int32 num_workers = 206 [default = 0];
optional int32 num_search_workers = 100 [default = 0];
// We distinguish subsolvers that consume a full thread, and the ones that are
// always interleaved. If left at zero, we will fix this with a default
// formula that depends on num_workers. But if you start modifying what runs,
// you might want to fix that to a given value depending on the num_workers
// you use.
optional int32 num_full_subsolvers = 294 [default = 0];
// In multi-thread, the solver can be mainly seen as a portfolio of solvers
// with different parameters. This field indicates the names of the parameters
// that are used in multithread. This only applies to "full" subsolvers.
//
// See cp_model_search.cc to see a list of the names and the default value (if
// left empty) that looks like:
// - default_lp (linearization_level:1)
// - fixed (only if fixed search specified or scheduling)
// - no_lp (linearization_level:0)
// - max_lp (linearization_level:2)
// - pseudo_costs (only if objective, change search heuristic)
// - reduced_costs (only if objective, change search heuristic)
// - quick_restart (kind of probing)
// - quick_restart_no_lp (kind of probing with linearization_level:0)
// - lb_tree_search (to improve lower bound, MIP like tree search)
// - probing (continuous probing and shaving)
//
// Also, note that some set of parameters will be ignored if they do not make
// sense. For instance if there is no objective, pseudo_cost or reduced_cost
// search will be ignored. Core based search will only work if the objective
// has many terms. If there is no fixed strategy fixed will be ignored. And so
// on.
//
// The order is important, as only the first num_full_subsolvers will be
// scheduled. You can see in the log which one are selected for a given run.
repeated string subsolvers = 207;
// A convenient way to add more workers types.
// These will be added at the beginning of the list.
repeated string extra_subsolvers = 219;
// Rather than fully specifying subsolvers, it is often convenient to just
// remove the ones that are not useful on a given problem or only keep
// specific ones for testing. Each string is interpreted as a "glob", so we
// support '*' and '?'.
//
// The way this work is that we will only accept a name that match a filter
// pattern (if non-empty) and do not match an ignore pattern. Note also that
// these fields work on LNS or LS names even if these are currently not
// specified via the subsolvers field.
repeated string ignore_subsolvers = 209;
repeated string filter_subsolvers = 293;
// It is possible to specify additional subsolver configuration. These can be
// referred by their params.name() in the fields above. Note that only the
// specified field will "overwrite" the ones of the base parameter. If a
// subsolver_params has the name of an existing subsolver configuration, the
// named parameters will be merged into the subsolver configuration.
repeated SatParameters subsolver_params = 210;
// Experimental. If this is true, then we interleave all our major search
// strategy and distribute the work amongst num_workers.
//
// The search is deterministic (independently of num_workers!), and we
// schedule and wait for interleave_batch_size task to be completed before
// synchronizing and scheduling the next batch of tasks.
optional bool interleave_search = 136 [default = false];
optional int32 interleave_batch_size = 134 [default = 0];
// Allows objective sharing between workers.
optional bool share_objective_bounds = 113 [default = true];
// Allows sharing of the bounds of modified variables at level 0.
optional bool share_level_zero_bounds = 114 [default = true];
// Allows sharing of new learned binary clause between workers.
optional bool share_binary_clauses = 203 [default = true];
// Allows sharing of short glue clauses between workers.
// Implicitly disabled if share_binary_clauses is false.
optional bool share_glue_clauses = 285 [default = false];
// ==========================================================================
// Debugging parameters
// ==========================================================================
// We have two different postsolve code. The default one should be better and
// it allows for a more powerful presolve, but it can be useful to postsolve
// using the full solver instead.
optional bool debug_postsolve_with_full_solver = 162 [default = false];
// If positive, try to stop just after that many presolve rules have been
// applied. This is mainly useful for debugging presolve.
optional int32 debug_max_num_presolve_operations = 151 [default = 0];
// Crash if we do not manage to complete the hint into a full solution.
optional bool debug_crash_on_bad_hint = 195 [default = false];
// ==========================================================================
// Max-sat parameters
// ==========================================================================
// For an optimization problem, whether we follow some hints in order to find
// a better first solution. For a variable with hint, the solver will always
// try to follow the hint. It will revert to the variable_branching default
// otherwise.
optional bool use_optimization_hints = 35 [default = true];
// If positive, we spend some effort on each core:
// - At level 1, we use a simple heuristic to try to minimize an UNSAT core.
// - At level 2, we use propagation to minimize the core but also identify
// literal in at most one relationship in this core.
optional int32 core_minimization_level = 50 [default = 2];
// Whether we try to find more independent cores for a given set of
// assumptions in the core based max-SAT algorithms.
optional bool find_multiple_cores = 84 [default = true];
// If true, when the max-sat algo find a core, we compute the minimal number
// of literals in the core that needs to be true to have a feasible solution.
// This is also called core exhaustion in more recent max-SAT papers.
optional bool cover_optimization = 89 [default = true];
// In what order do we add the assumptions in a core-based max-sat algorithm
enum MaxSatAssumptionOrder {
DEFAULT_ASSUMPTION_ORDER = 0;
ORDER_ASSUMPTION_BY_DEPTH = 1;
ORDER_ASSUMPTION_BY_WEIGHT = 2;
}
optional MaxSatAssumptionOrder max_sat_assumption_order = 51
[default = DEFAULT_ASSUMPTION_ORDER];
// If true, adds the assumption in the reverse order of the one defined by
// max_sat_assumption_order.
optional bool max_sat_reverse_assumption_order = 52 [default = false];
// What stratification algorithm we use in the presence of weight.
enum MaxSatStratificationAlgorithm {
// No stratification of the problem.
STRATIFICATION_NONE = 0;
// Start with literals with the highest weight, and when SAT, add the
// literals with the next highest weight and so on.
STRATIFICATION_DESCENT = 1;
// Start with all literals. Each time a core is found with a given minimum
// weight, do not consider literals with a lower weight for the next core
// computation. If the subproblem is SAT, do like in STRATIFICATION_DESCENT
// and just add the literals with the next highest weight.
STRATIFICATION_ASCENT = 2;
}
optional MaxSatStratificationAlgorithm max_sat_stratification = 53
[default = STRATIFICATION_DESCENT];
// ==========================================================================
// Constraint programming parameters
// ==========================================================================
// Some search decisions might cause a really large number of propagations to
// happen when integer variables with large domains are only reduced by 1 at
// each step. If we propagate more than the number of variable times this
// parameters we try to take counter-measure. Setting this to 0.0 disable this
// feature.
//
// TODO(user): Setting this to something like 10 helps in most cases, but the
// code is currently buggy and can cause the solve to enter a bad state where
// no progress is made.
optional double propagation_loop_detection_factor = 221 [default = 10.0];
// When this is true, then a disjunctive constraint will try to use the
// precedence relations between time intervals to propagate their bounds
// further. For instance if task A and B are both before C and task A and B
// are in disjunction, then we can deduce that task C must start after
// duration(A) + duration(B) instead of simply max(duration(A), duration(B)),
// provided that the start time for all task was currently zero.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_precedences_in_disjunctive_constraint = 74 [default = true];
// Create one literal for each disjunction of two pairs of tasks. This slows
// down the solve time, but improves the lower bound of the objective in the
// makespan case. This will be triggered if the number of intervals is less or
// equal than the parameter and if use_strong_propagation_in_disjunctive is
// true.
optional int32 max_size_to_create_precedence_literals_in_disjunctive = 229
[default = 60];
// Enable stronger and more expensive propagation on no_overlap constraint.
optional bool use_strong_propagation_in_disjunctive = 230 [default = false];
// Whether we try to branch on decision "interval A before interval B" rather
// than on intervals bounds. This usually works better, but slow down a bit
// the time to find the first solution.
//
// These parameters are still EXPERIMENTAL, the result should be correct, but
// it some corner cases, they can cause some failing CHECK in the solver.
optional bool use_dynamic_precedence_in_disjunctive = 263 [default = false];
optional bool use_dynamic_precedence_in_cumulative = 268 [default = false];
// When this is true, the cumulative constraint is reinforced with overload
// checking, i.e., an additional level of reasoning based on energy. This
// additional level supplements the default level of reasoning as well as
// timetable edge finding.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_overload_checker_in_cumulative = 78 [default = false];
// Enable a heuristic to solve cumulative constraints using a modified energy
// constraint. We modify the usual energy definition by applying a
// super-additive function (also called "conservative scale" or "dual-feasible
// function") to the demand and the durations of the tasks.
//
// This heuristic is fast but for most problems it does not help much to find
// a solution.
optional bool use_conservative_scale_overload_checker = 286 [default = false];
// When this is true, the cumulative constraint is reinforced with timetable
// edge finding, i.e., an additional level of reasoning based on the
// conjunction of energy and mandatory parts. This additional level
// supplements the default level of reasoning as well as overload_checker.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_timetable_edge_finding_in_cumulative = 79 [default = false];
// Max number of intervals for the timetable_edge_finding algorithm to
// propagate. A value of 0 disables the constraint.
optional int32 max_num_intervals_for_timetable_edge_finding = 260
[default = 100];
// If true, detect and create constraint for integer variable that are "after"
// a set of intervals in the same cumulative constraint.
//
// Experimental: by default we just use "direct" precedences. If
// exploit_all_precedences is true, we explore the full precedence graph. This
// assumes we have a DAG otherwise it fails.
optional bool use_hard_precedences_in_cumulative = 215 [default = false];
optional bool exploit_all_precedences = 220 [default = false];
// When this is true, the cumulative constraint is reinforced with propagators
// from the disjunctive constraint to improve the inference on a set of tasks
// that are disjunctive at the root of the problem. This additional level
// supplements the default level of reasoning.
//
// Propagators of the cumulative constraint will not be used at all if all the
// tasks are disjunctive at root node.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_disjunctive_constraint_in_cumulative = 80 [default = true];
// When this is true, the no_overlap_2d constraint is reinforced with
// propagators from the cumulative constraints. It consists of ignoring the
// position of rectangles in one position and projecting the no_overlap_2d on
// the other dimension to create a cumulative constraint. This is done on both
// axis. This additional level supplements the default level of reasoning.
optional bool use_timetabling_in_no_overlap_2d = 200 [default = false];
// When this is true, the no_overlap_2d constraint is reinforced with
// energetic reasoning. This additional level supplements the default level of
// reasoning.
optional bool use_energetic_reasoning_in_no_overlap_2d = 213
[default = false];
// When this is true, the no_overlap_2d constraint is reinforced with
// an energetic reasoning that uses an area-based energy. This can be combined
// with the two other overlap heuristics above.
optional bool use_area_energetic_reasoning_in_no_overlap_2d = 271
[default = false];
// If the number of pairs to look is below this threshold, do an extra step of
// propagation in the no_overlap_2d constraint by looking at all pairs of
// intervals.
optional int32 max_pairs_pairwise_reasoning_in_no_overlap_2d = 276
[default = 1250];
// When set, it activates a few scheduling parameters to improve the lower
// bound of scheduling problems. This is only effective with multiple workers
// as it modifies the reduced_cost, lb_tree_search, and probing workers.
optional bool use_dual_scheduling_heuristics = 214 [default = true];
// The search branching will be used to decide how to branch on unfixed nodes.
enum SearchBranching {
// Try to fix all literals using the underlying SAT solver's heuristics,
// then generate and fix literals until integer variables are fixed. New
// literals on integer variables are generated using the fixed search
// specified by the user or our default one.
AUTOMATIC_SEARCH = 0;
// If used then all decisions taken by the solver are made using a fixed
// order as specified in the API or in the CpModelProto search_strategy
// field.
FIXED_SEARCH = 1;
// Simple portfolio search used by LNS workers.
PORTFOLIO_SEARCH = 2;
// If used, the solver will use heuristics from the LP relaxation. This
// exploit the reduced costs of the variables in the relaxation.
LP_SEARCH = 3;
// If used, the solver uses the pseudo costs for branching. Pseudo costs
// are computed using the historical change in objective bounds when some
// decision are taken. Note that this works whether we use an LP or not.
PSEUDO_COST_SEARCH = 4;
// Mainly exposed here for testing. This quickly tries a lot of randomized
// heuristics with a low conflict limit. It usually provides a good first
// solution.
PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5;
// Mainly used internally. This is like FIXED_SEARCH, except we follow the
// solution_hint field of the CpModelProto rather than using the information
// provided in the search_strategy.
HINT_SEARCH = 6;
// Similar to FIXED_SEARCH, but differ in how the variable not listed into
// the fixed search heuristics are branched on. This will always start the
// search tree according to the specified fixed search strategy, but will
// complete it using the default automatic search.
PARTIAL_FIXED_SEARCH = 7;
// Randomized search. Used to increase entropy in the search.
RANDOMIZED_SEARCH = 8;
}
optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH];
// Conflict limit used in the phase that exploit the solution hint.
optional int32 hint_conflict_limit = 153 [default = 10];
// If true, the solver tries to repair the solution given in the hint. This
// search terminates after the 'hint_conflict_limit' is reached and the solver
// switches to regular search. If false, then we do a FIXED_SEARCH using the
// hint until the hint_conflict_limit is reached.
optional bool repair_hint = 167 [default = false];
// If true, variables appearing in the solution hints will be fixed to their
// hinted value.
optional bool fix_variables_to_their_hinted_value = 192 [default = false];
// If true, search will continuously probe Boolean variables, and integer
// variable bounds. This parameter is set to true in parallel on the probing
// worker.
optional bool use_probing_search = 176 [default = false];
// Use extended probing (probe bool_or, at_most_one, exactly_one).
optional bool use_extended_probing = 269 [default = true];
// How many combinations of pairs or triplets of variables we want to scan.
optional int32 probing_num_combinations_limit = 272 [default = 20000];
// Add a shaving phase (where the solver tries to prove that the lower or
// upper bound of a variable are infeasible) to the probing search.
optional bool use_shaving_in_probing_search = 204 [default = true];
// Specifies the amount of deterministic time spent of each try at shaving a
// bound in the shaving search.
optional double shaving_search_deterministic_time = 205 [default = 0.001];
// Specifies the threshold between two modes in the shaving procedure.
// If the range of the variable/objective is less than this threshold, then
// the shaving procedure will try to remove values one by one. Otherwise, it
// will try to remove one range at a time.
optional int64 shaving_search_threshold = 290 [default = 64];
// If true, search will search in ascending max objective value (when
// minimizing) starting from the lower bound of the objective.
optional bool use_objective_lb_search = 228 [default = false];
// This search differs from the previous search as it will not use assumptions
// to bound the objective, and it will recreate a full model with the
// hardcoded objective value.
optional bool use_objective_shaving_search = 253 [default = false];
// This search takes all Boolean or integer variables, and maximize or
// minimize them in order to reduce their domain.
optional bool use_variables_shaving_search = 289 [default = false];
// The solver ignores the pseudo costs of variables with number of recordings
// less than this threshold.
optional int64 pseudo_cost_reliability_threshold = 123 [default = 100];
// The default optimization method is a simple "linear scan", each time trying
// to find a better solution than the previous one. If this is true, then we
// use a core-based approach (like in max-SAT) when we try to increase the
// lower bound instead.
optional bool optimize_with_core = 83 [default = false];
// Do a more conventional tree search (by opposition to SAT based one) where
// we keep all the explored node in a tree. This is meant to be used in a
// portfolio and focus on improving the objective lower bound. Keeping the
// whole tree allow us to report a better objective lower bound coming from
// the worst open node in the tree.
optional bool optimize_with_lb_tree_search = 188 [default = false];
// Experimental. Save the current LP basis at each node of the search tree so
// that when we jump around, we can load it and reduce the number of LP