-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexamgen.nw
1000 lines (887 loc) · 33.9 KB
/
examgen.nw
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
\documentclass[a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[british]{babel}
\usepackage{authblk}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage[hyphens]{url}
\usepackage{hyperref}
\usepackage[capitalize]{cleveref}
\usepackage[all]{foreign}
\renewcommand{\foreignfullfont}{}
\renewcommand{\foreignabbrfont}{}
\usepackage{listings}
\lstset{%
basicstyle=\footnotesize,
numbers=left
}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{thmtools}
\declaretheorem[style=definition,
name=Example,refname={example,examples}]{example}
\usepackage{noweb}
% Needed to relax penalty for breaking code chunks across pages, otherwise
% there might be a lot of space following a code chunk.
\def\nwendcode{\endtrivlist \endgroup}
\let\nwdocspar=\smallbreak
\usepackage{csquotes}
\usepackage{acro}
\DeclareAcronym{ILO}{
short={ILO},
short-indefinite={an},
long={intended learning outcome},
long-indefinite={an},
}
\usepackage[natbib,style=alphabetic]{biblatex}
\addbibresource{examgen.bib}
\title{%
examgen: An exam generator
}
\author{Daniel Bosk}
\affil{%
Department of Information and Communication Systems,\\
Mid Sweden University, SE-851\,70 Sundsvall
}
\date{Version 4.0}
\begin{document}
\maketitle
\begin{abstract}
\input{abstract.tex}
\end{abstract}
\clearpage
\tableofcontents
\clearpage
@
\section{Introduction}
The purpose of this program is to automatically generate the LaTeX code for an
exam based on some inputs.
The inputs will be a database of questions and parameters on how to choose
questions from the database.
The idea is to use the exam~\cite{exam} document class.
Then we can have all exam questions available so that examgen(1) can randomly
select a number of them and put together the [[questions]] environment of the
exam document class.
\subsection{Outline}
The program is a Python 3 script, [[<<examgen.py>>]].
We will use the following structure:
<<examgen.py>>=
#!/usr/bin/env python3
<<imports>>
<<constants>>
<<classes>>
<<functions>>
def main(argv):
<<main body>>
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except Exception as err:
print("{0}: {1}".format(sys.argv[0], err), file=sys.stderr)
sys.exit(1)
@ Then we will successively specify what these mean.
The [[<<imports>>]] will contain our imported modules.
For instance, since we use [[sys.argv]] and [[sys.exit]] above we'll need to
add
<<imports>>=
import sys
@ to [[<<imports>>]].
The code blocks [[<<classes>>]] and [[<<functions>>]] will contain our classes
and functions, respectively.
The [[<<main body>>]] block contains the code of the main function.
Basically, we need the following code blocks:
<<main body>>=
<<parse command-line arguments>>
<<generate exam and output result>>
@
To parse command-line arguments we will make use of Python's
[[argparse]]~\cite{argparse}:
<<parse command-line arguments>>=
argp = argparse.ArgumentParser(description="Generates an exam.")
@ We also need to add it to our imports:
<<imports>>=
import argparse
@ The parsing step will then be to [[<<parse arguments>>]] and then
[[<<process arguments>>]]:
<<parse command-line arguments>>=
<<parse arguments>>
<<process arguments>>
@
The processing step is rather straight-forward using [[argparse]].
We simply parse [[argv]] and get a Python dictionary containing the variables
we specify in [[<<parse arguments>>]]:
<<process arguments>>=
args = vars(argp.parse_args(argv[1:]))
@
\section{Design}
Inevitably, we need to keep the exam questions in some form of database.
Our first option is to reuse the (LaTeX code of) old exams, i.e.~one exam is
considered one database file.
Thus we need to be able to add several database files, and we must add at least
one.
We will do that by adding a database argument to the program:
<<parse arguments>>=
argp.add_argument(
"-d", "--database", nargs="+",
required=True, help="Adds a questions database to use")
@ This will give us a Python list containing all the names, so we can read all
questions in each database:
<<process arguments>>=
questions = set()
for database in args["database"]:
<<read questions database>>
@ We store the questions in a set since we are not interested in any
redundancy, the same question should not be added twice even if it occurs in
two database files.
\subsection{The questions database format}
Since the design allows for using old exams as database files, then we must
adapt our database format to this.
We assume that the exams are using the exam~\cite{exam} document class.
As such each question will start with the command \verb'\question' and end at
the beginning of the next question.
Or in the special case of the last question, it ends with the end of the
[[questions]] environment.
However, we also want to be able to use exercises from teaching material, which
is assumed to be an exercise environment.
We can thus make use of Python's regular expressions facilities~\cite{regex}:
<<imports>>=
import re
@ We can use the following code block to set up a regular expression pattern to
match a question (exam class format), exercise, problem (nada-ten format) or
question, questionmult, choice and choices environments (auto-multiple-choice
format):
<<set up question regex>>=
question_code_pattern = re.compile(
"(\\\\question(.|\n)*?"
"(?=(\\\\question|\\\\end{questions}|\\\\begin{exercise}))|"
"\\\\begin{exercise}(.|\n)*?\\\\end{exercise}|"
"\\\\begin{problem}(.|\n)*?\\\\end{problem}|"
"\\\\begin{question}{\w*}(.|\n)*?\\\\end{question}|"
"\\\\begin{questionmult}{\w*}(.|\n)*?\\\\end{questionmult}|"
"\\\\begin{choice}{\w*}(.|\n)*?\\\\end{choice}|"
"\\\\begin{choices}{\w*}(.|\n)*?\\\\end{choices})",
re.MULTILINE)
@ (See~\cite{regex-lookaround} for a treatment of zero-width assertions in
regular expressions.)
The regular expression consists of two parts.
The first part matches questions (exam format) and the second part matches
exercise, problem and question environments.
This expression will conveniently also include any parts or solution
environments used inside the exam question or nada-ten problem formats.
\begin{example}[The question format]
The regular expression in [[<<set up question regex>>]] will allow questions
of which the following is an example:
\begin{lstlisting}[language={[LaTeX]TeX}]
\begin{questions}
\question What kind of question is this?!
\question Or this:
\begin{parts}
\part Or this?!
\part Or even this?!
\end{parts}
\question[3]
How much wood could a woodchuck chuck if a woodchuck could chuck wood?
\begin{solution}
A woodchuck would chuck no amount of wood since a woodchuck can't chuck
wood.
\end{solution}
\end{questions}
\end{lstlisting}
It does not matter what surrounds the question environment.
The only reason it is needed is to mark the end of the last question.
\end{example}
\begin{example}[The exercise format]
The regular expression in [[<<set up question regex>>]] will also allow
exercises of which the following is an example:
\begin{lstlisting}[language={[LaTeX]TeX}]
\begin{exercise}
Do a few push-ups \dots
\end{exercise}
\dots
\begin{exercise}
Now do some sit-ups \dots
\end{exercise}
\end{lstlisting}
Similarly as for the question format: it does not matter what surrounds the
environments, they can be in the middle of your slides.
\end{example}
To read the questions database, we simply open each database, read its contents
and parse each question.
In case the file does not exist we issue an error message to the user, then we
continue with the next file.
This allows us to specify files that might exist in the future, see
\cref{ExamsForCourse} for an example.
<<read questions database>>=
<<set up question regex>>
try:
file_contents = open(database, "r").read()
except Exception as err:
print("{0}: {1}".format(sys.argv[0], err), file=sys.stderr)
continue
<<match a question>>
while match:
<<parse question>>
<<remove matching question>>
<<match a question>>
@ To match a question we simply let
<<match a question>>=
match = re.search(question_code_pattern, file_contents)
@ This makes [[match]] an object of [[MatchObject]] type.
This means that we can use its [[end()]] method to remove the already searched
text from [[file_contents]]:
<<remove matching question>>=
file_contents = file_contents[match.end():]
@
\subsection{The question format}
Now that we know the format of the question databases, this brings us to the
next part: the format of the actual question and how to do
[[<<parse question>>]].
We need some sort of data structure to hold each question and its related
meta-data.
One solution is to use a class:
<<classes>>=
class Question:
<<question constructors>>
<<question methods>>
@
We will have at least one constructor.
A suitable one is to construct the question from its LaTeX code:
<<question constructors>>=
def __init__(self, code):
self.__code = code
<<question constructor body>>
@ This makes the LaTeX code also a natural attribute of the class.
We will use Python's properties to do this:
<<question methods>>=
@@property
def code(self):
return self.__code
@
Now we need to construct the question.
If the question actually is an exercise environment, then we first need to
convert it.
<<question constructor body>>=
if self.__code.find(r"\begin{exercise}") >= 0:
<<transform exercise to question form>>
@ To transform an exercise to a question we can replace the beginning of the
environment with the question command and simply drop the end of the
environment.
<<transform exercise to question form>>=
self.__code = self.__code.replace(r"\begin{exercise}", r"\question ")
self.__code = self.__code.replace(r"\end{exercise}", "")
@
\begin{example}[Tranforming exercises]
The following exercise environment
\begin{lstlisting}[language={[LaTeX]TeX}]
\begin{exercise}
Enumerate all prime numbers.
\end{exercise}
\end{lstlisting}
will be transformed into the following question
\begin{lstlisting}[language={[LaTeX]TeX}]
\question
Enumerate all prime numbers.
\end{lstlisting}
\end{example}
To be able to add a question to a set, the data structure must be
\enquote{hashable} and have an equality operator:
<<question methods>>=
def __hash__(self):
return hash(self.code)
def __eq__(self, other):
if not hasattr(other, "code"):
return NotImplemented
return (self.code == other.code)
@ This also means that we are not allowed to modify the question object
throughout its lifetime, i.e.\ the [[code]] attribute must not be modified.
(That is why we do not provide any method to modify it.)
Now that we have everything we need to parse the question, we can thus define
<<parse question>>=
questions.add(Question(match.group()))
@ where we add the question to the set of questions.
\subsection{Tags}
\label{Tags}
We have handled the problem of the same question reoccurring by defining
equality and adding all questions to a set.
However, we still have the problem of similar questions occurring, i.e.\ more
like semantic equality.
Questions that treat the same topic or \ac{ILO} in a similar way must be
recognized as such, because we don't want two questions which are too similar
in the same exam.
One solution is that we tag the questions.
The tags can be used to identify topics, the difficulty level of the question
and which \acp{ILO} it covers, they can capture the type of semantic similarity
we want.
Thus it makes sense to have a set of tags as an attribute for our question
class:
<<question constructor body>>=
self.__tags = set()
@ Since we cannot modify the question, there is no need to update the tags
either, so we only need a readable property.
<<question methods>>=
@@property
def tags(self):
return self.__tags
@
It will be useful to compute the set of tags for several questions, for this
purpose we provide the following function.
<<functions>>=
def tags(questions):
T = set()
for q in questions:
T |= q.tags
return T
@
\subsubsection{Tags in the label}
There are several ways we can implement how the tags are stored in the
question.
We can, for instance, use the LaTeX [[\label]] command.
(We will cover a better way shortly, this way is kept for backwards
compatibility and might disappear soon.)
Most questions have an attached label, conventionally prefixed with [[q:]] and
followed by something that captures the content of the question.
We can use this convention to add a colon separated list of tags using the
label command.
This list can be extracted in the following way:
<<question constructor body>>=
<<question tags regex pattern>>
<<filter out tags using regex>>
@ where
<<question tags regex pattern>>=
question_label_pattern = re.compile(
"\\\\label{(q|xrc):([^}]*)}",
re.MULTILINE)
@
\begin{example}[Tags in label]
The regular expression above will allow questions to be tagged by a label,
for instance:
\begin{lstlisting}[language={[LaTeX]TeX}]
\question\label{q:AnalyseThis}
Analyse the following statement: exams are not a good tool for assessment, but
it scales well.
\end{lstlisting}
It works for both questions and exercises.
\end{example}
We can then extract the tags using the second group in the pattern and add the
tags to the set of tags:
<<filter out tags using regex>>=
matched_tags = question_label_pattern.finditer(self.code)
for match in matched_tags:
self.__tags |= set(match.group(2).split(":"))
@ This will yield [[None]] if the regular expression does not match, i.e.\
there is no label for the question.
If there is, this will yield a Python list of tags, which is immediately
converted to a set.
\subsubsection{Tags as comments}
Keeping the tags in the label works if there are a few shortly-named tags,
however, this approach does not scale well.
For this reason we also want to provide another way to supply several
longer-named tags, e.g.\ to have \acp{ILO} as tags.
We will do this by a specially crafted comment in the code of the question.
<<question tags regex pattern>>=
question_comment_pattern = re.compile(
"% ?(tags?|ilos?|ILOs?): ?(.*)$",
re.MULTILINE)
@ Then we can take the union of these tags and those found in the label:
<<filter out tags using regex>>=
matched_tags = question_comment_pattern.finditer(self.code)
for match in matched_tags:
self.__tags |= set(match.group(2).split(":"))
@
\begin{example}[Tags as comments]
This is the preferred method, it scales better and we can include the tags in
context:
\begin{lstlisting}[language={[LaTeX]TeX}]
\begin{exercise}
You are going to design an authentication system:
\begin{enumerate}[a)]
% ILO: AnalyseSecurityProperties
% topic: crypto
\item Analyse the security properties you need.
% ILO: DesignSecureSystem
% topic: crypto
\item Starting from the security properties, combine cryptographic
primitives to sketch a design of secure authenticiation system.
\end{enumerate}
\end{exercise}
\end{lstlisting}
It is fine that we have the same topic twice, our use of sets ensures that
redundancy is not an issue.
This works for both questions and exercises.
\end{example}
\section{Randomly selecting the questions}
% XXX Add references for how to create an exam
The purpose of this work is to construct an exam, hence there are several
aspects we need to consider.
The exam should assess if the student has fulfilled the \acp{ILO} of the course
(at least those that are not assessed in any other way).
Since we cannot include every detail of the material treated in the course in
the assessment, the exam usually depends on a random sample of the material
covering the \acp{ILO} of the course.
Thus, how we select the questions is of great importance.
Firstly, we need to identify similar questions, i.e.\ questions covering the
same things.
This is covered in \cref{Tags}.
Secondly, we need to make a selection which covers the course in a good way.
In \cref{FindingCovering} we describe an algorithm which solves this problem in
general.
Then we will discuss how to adapt its behaviour in
\cref{StopConditions,QuestionFilters}.
\subsection{Finding a covering}
\label{FindingCovering}
When we generate an exam, we want to specify which tags we are interested in.
We can do this using an argument on the command-line:
<<parse arguments>>=
argp.add_argument(
"-t", "--tags", nargs="+",
required=True, help="Adds required question tags")
@ This will give us a list of tags which we can use when selecting the
questions for the exam:
<<process arguments>>=
required_tags = set(args["tags"])
@
We now have the set of required tags \(E\) for the exam, [[required_tags]] in
[[<<process arguments>>]] above.
We also have the set of tags \(Q_i\) for each question \(i\), through the
[[tags]] property in [[<<question methods>>]].
What we want is a set of questions \(\{ Q_i \}_i\) which cover the exam \(E\),
i.e.~\(E\subseteq \bigcup_i Q_i\).
We will now describe a general algorithm which takes a set of required tags,
a set of questions and then returns a subset of the questions which covers the
required tags.
As implied above, the creation of an exam depends on the required tags and the
question universe available.
We will actually generalize this a bit, we will use the universe of questions,
a predicate for stopping and a question filter.
Thus, to find a covering (generate an exam) we can do the following:
<<functions>>=
def examgen(questions, required_tags, stop_condition, question_filter):
exam_questions = set()
while not stop_condition(exam_questions):
<<randomly select a question>>
<<check if the question is good>>
<<yield the question as output>>
<<remove the question so we don't select it again>>
@ (The reason we include the required tags is to form a more useful error
message in [[<<randomly select a question>>]] below.)
Note that this function will be a Python generator, not a function returning a
list.
We will only use [[exam_questions]] for internal purposes, \eg the interactive
filter (see \cref{interactive-filter}).
We can randomly select a question by
<<randomly select a question>>=
try:
question = random.sample(questions, 1)[0]
except ValueError as err:
tags_left = required_tags - tags(exam_questions)
raise ValueError("{0}: missing {1}".format(err, tags_left))
@ [[random.sample]] returns a list of the one sample we requested or it raises
an exception.
If no error occurred when randomly selecting a question, then we store only
that sample (instead of a list containing only one sample).
Then we can add the question to the exam by
<<yield the question as output>>=
exam_questions.add(question)
yield question
@ We use [[yield]] to provide a generator, but we must add the question to the
set of questions for use in some other functions.
Lastly we remove it from the universe of questions so that we do not select it
again:
<<remove the question so we don't select it again>>=
questions.discard(question)
@ The random selection also requires us to add the [[random]] module:
<<imports>>=
import random
@
The last thing we need to do is to [[<<check if the question is good>>]].
For this purpose we use the [[question_filter]], i.e.\ pass each randomly
chosen question through the filter to see if it passes.
The filter is allowed to modify the question, so it can be a transformation, so
we must handle this case too.
<<check if the question is good>>=
filtered_question = question_filter(question, exam_questions)
if filtered_question is None:
<<remove the question so we don't select it again>>
continue
else:
question = filtered_question
@ Different filters will be treated in \cref{QuestionFilters}.
\subsection{Generating the output}
Now we can generate an exam in the following way:
<<generate exam and output result>>=
<<set up stop condition>>
<<set up question filter>>
for q in examgen(questions, required_tags,
stop_condition, question_filter):
print("{0}".format(q.code))
@ We will return to the stop condition in \cref{StopConditions} and filters,
first, in [[<<check if the question is good>>]] and then in
\cref{QuestionFilters}.
The trivial solution is to just print the code for the questions to standard
out, which is what we do above.
We note that the questions are the only thing printed to standard out, since
everywhere else we print to standard error.
We also note that [[examgen]] is written as a Python generator.
This means that we will print each question as it is generated, no buffering.
The reason for this is to ensure that if the program is killed (or an error
occurs) during the generation, the user's modifications will not be lost since
they were written to standard out already.
\subsection{Stop conditions}
\label{StopConditions}
The stop condition suggested above is when the required tags \(E\) is a subset
of the tags of the questions in the exam, \(E\subseteq \bigcup_i Q_i\).
Now we can formulate that stop condition as:
<<classes>>=
class StopWhenSubset:
def __init__(self, required_tags):
self.__req_tags = required_tags
def __call__(self, selected_questions):
return self.__req_tags <= tags(selected_questions)
@
Now we can instantiate a stop condition.
<<set up stop condition>>=
stop_condition = StopWhenSubset(required_tags)
@
\subsection{Filtering questions}
\label{QuestionFilters}
The main part of the algorithm is to check if a question is good or not, i.e.\
the filtering, and we will cover that part now.
As suggested above, a filter requires two parameters:
the question and the currently selected questions.
As a filter, we will use a callable object, be it a function or an instance of
a callable class.
We will discuss a few filters and how to activate them below, however, first we
will discuss filter composition.
It is desirable that the filters are as simple as possible.
Then, to form more complex filters, we should be able to compose several simple
filters.
We want to compose functions, and to do this we can introduce a function
[[compose_filters]] which returns a new function-like object which is a filter
function.
<<functions>>=
def compose_filters(filterA, filterB):
return lambda x, y: filterB(filterA(x, y), y)
@
\subsubsection{No filtering}
The most basic filter we could use will simply allow all questions to pass.
Sometimes this is actually useful, but it also illustrates the technique we
will continue to use for the other filters.
<<classes>>=
class NoFilter:
def __call__(self, question, selected_questions):
return question
@ We can add this as the default filter.
<<set up question filter>>=
question_filter = NoFilter()
@ We note that we could have used a function instead of a callable class
for this filter.
\subsubsection{A question must add new tags}
To decrease the number of questions in an exam we want to require every new
question to cover new tags, i.e.\ the new question is not a subset of the
already selected questions.
This way we will not have several questions covering exactly the same tags, but
they are allowed to overlap by a proper subset.
<<classes>>=
class IsNotSubset:
def __call__(self, question, selected_questions):
if question is None:
return None
elif question.tags <= tags(selected_questions):
return None
else:
return question
@
We will add a command-line option to enable this filter.
<<parse arguments>>=
argp.add_argument(
"-N", "--require-new-tags",
default=False, action="store_true",
help="Requires that each new question adds new tags to the exam")
@ If this option is enabled we will compose this filter with already set
filters.
<<set up question filter>>=
if args["require_new_tags"]:
question_filter = compose_filters(question_filter, IsNotSubset())
@
\subsubsection{Questions must have disjoint tag sets}
We can also add a stricter version of the previous filter, i.e.\ to require
that each question has no overlapping tags.
<<classes>>=
class DisjointQuestions:
def __call__(self, question, selected_questions):
if question is None:
return None
elif question.tags & tags(selected_questions) != set():
return None
else:
return question
@
We will add the following command-line option to enable this filter.
<<parse arguments>>=
argp.add_argument(
"-D", "--require-disjoint",
default=False, action="store_true",
help="Requires that each question has disjoint tag sets")
@ We will simply compose this filter if enabled.
<<set up question filter>>=
if args["require_disjoint"]:
question_filter = compose_filters(question_filter, DisjointQuestions())
@
\subsubsection{Any covering}
Now we will introduce the first filter to require that the question actually
contribute to the covering of the exam's tags.
This means that a question must have one of the required tags.
As such, we also need to provide the filter with the required tags.
<<classes>>=
class RequireCover:
def __init__(self, required_tags):
self.__req_tags = required_tags
def __call__(self, question, selected_questions):
if question is None:
return None
elif question.tags & self.__req_tags != set():
return question
else:
return None
@ This actually prevents questions with no tags.
We provide the following command-line option to enable this filter.
<<parse arguments>>=
argp.add_argument(
"-C", "--require-cover",
default=False, action="store_true",
help="Requires that each question must cover some of the required tags")
@ If this is enabled we will compose the filter.
<<set up question filter>>=
if args["require_cover"]:
question_filter = \
compose_filters(question_filter, RequireCover(required_tags))
@
\subsubsection{An exact covering}
Usually we do not want to include questions which cover topics outside the
scope of the exam.
This requires a stronger filter than the previous one, i.e.\ the question tags
must be a subset of the exam tags, \(Q_i\subseteq E\).
<<classes>>=
class ExactCovering:
def __init__(self, required_tags):
self.__req_tags = required_tags
def __call__(self, question, selected_questions):
if question is None:
return None
elif question.tags <= self.__req_tags:
return question
else:
return None
@
We will add a command-line option to activate this filter.
<<parse arguments>>=
argp.add_argument(
"-E", "--require-exact",
default=False, action="store_true",
help="Requires every question's tag set to be a subset of the required tags"
", i.e. an exact covering")
@ And finally we compose it if enabled.
<<set up question filter>>=
if args["require_exact"]:
question_filter = \
compose_filters(question_filter, ExactCovering(required_tags))
@
\subsubsection{Human intervention}%
\label{interactive-filter}
At times we might want some human intervention:
some of the questions might not be tagged, or not suitably tagged;
some questions might be good starting points for better questions.
Since the selection is randomized, we might want to have some human
intervention to guarantee a better selection.
We will do this by opening the question for editing in the user's favourite
editor.
This will allow the user to edit the question text and its tags.
In essence, with this filter the user can now use the exam generator to
generate a list of questions to use as inspiration for a new exam.
Since this is a feature we might only want occasionally, we add a command-line
argument to enable it:
<<parse arguments>>=
argp.add_argument(
"-i", "--interactive",
default=False, action="store_true",
help="Turns interactive mode on, "
"lets you edit each qualifying question with ${EDITOR}")
@ We will create a filter which provides this interaction.
We will need to know about the required tags to provide some help to the user,
e.g.\ to remind the user of the remaining required tags not already covered.
We also need to re-filter the question after editing, to ensure that it still
conforms to the user's other filters.
<<classes>>=
class OpenWithEditor:
def __init__(self, required_tags, question_filter):
self.__req_tags = required_tags
self.__filter = question_filter
def __call__(self, question, selected_questions):
if question is None:
return None
else:
return edit_question(question, self.__req_tags, self.__filter,
selected_questions)
@ Consequently we will compose this filter in the same fashion as previously.
The only difference is that we supply the current composed filter to our
interactive filter.
<<set up question filter>>=
if args["interactive"]:
question_filter = compose_filters(question_filter,
OpenWithEditor(required_tags, question_filter))
@
The [[edit_question]] function is defined as follows.
<<functions>>=
def edit_question(question, required_tags, question_filter, exam_questions):
<<open the question in the editor>>
<<check that the question is still qualified>>
@
\paragraph{Open the question in the user's editor}
To open the question for editing in the user's editor, we have to write the
question to a temporary file and open that file with the editor.
Then we read the contents back to process it.
<<open the question in the editor>>=
<<create a temporary file>>
<<execute the editor with file as argument>>
<<read the file contents back>>
@ We will use Python's interface to the operating system to create a temporary
file in the proper way:
<<create a temporary file>>=
fd, filename = tempfile.mkstemp()
file = os.fdopen(fd, "w")
<<write the question to file>>
file.close()
@ This requires the [[tempfile]] module.
<<imports>>=
import tempfile
@
We open the file by executing what is in the [[EDITOR]] environment variable in
the shell.
In some cases we will run with [[stdin]] and [[stdout]] redirected, this will
not work well when launching the editor.
To fix this we will ensure that [[stdin]] and [[stdout]] are set to the those
of the controlling terminal ([[/dev/tty]]) for the subprocess (the editor).
<<execute the editor with file as argument>>=
command = [os.environ.get("EDITOR", "vim"), filename]
tty = os.open("/dev/tty", os.O_RDWR)
subprocess.run(command, stdin=tty, stdout=tty, stderr=tty)
os.close(tty)
@ This in turn requires more modules:
<<imports>>=
import os, subprocess
@ Finally, we open the file when the sub-process (editor) has exited.
When we are done with the file we remove it.
<<read the file contents back>>=
with open(filename, "r") as file:
<<read the question back from file>>
os.unlink(filename)
@
\paragraph{Write the question to file}
To aid the user we do not only want to write the question code to the file, we
also want to include which tags are remaining for a complete cover of the tags.
Thus, first we write the remaining tags followed by a separator and finally the
code of the question.
<<write the question to file>>=
for t in (required_tags - tags(exam_questions)):
file.write("% remaining tag: " + t + "\n")
file.write("\n" + REMOVE_ABOVE_SEPARATOR + "\n")
file.write(question.code)
@ We add the separator to our constants.
<<constants>>=
REMOVE_ABOVE_SEPARATOR = "% ----- Everything ABOVE will be REMOVED -----"
@ Conversely we also want to read the edited file back when the user is done
editing.
<<read the question back from file>>=
question_lines = [line.strip("\n") for line in file.readlines()]
try:
question_lines = \
question_lines[question_lines.index(REMOVE_ABOVE_SEPARATOR)+1:]
except ValueError:
pass
finally:
question = Question("\n".join(question_lines))
@
\paragraph{Check that the question is still qualified}
Since the user is allowed to do arbitrary edits in the question, that means
that the tags might have changed in such a way that the question is not
qualified for the exam.
In these circumstances we must inform the user and give the user the chance to
correct this.
We note that since standard out might be piped, we will use standard error for
notifying the user.
<<check that the question is still qualified>>=
filtered_question = question_filter(question, exam_questions)
if filtered_question is None:
print("{0}: {1}\n".format(sys.argv[0],
"the question does not pass the filter(s)"),
file=sys.stderr)
<<ask the user to reject or edit again>>
elif filtered_question != question:
print("{0}: {1}\n".format(sys.argv[0],
"the question was modified by the filter(s)"),
file=sys.stderr)
<<ask the user to accept, edit filtered or unfiltered, or reject>>
else:
<<ask the user to accept, edit again or reject>>
@
Now that the user is supposedly done with the question, we should provide
alternatives to go back to editing, accepting the edited version or reject it
and go to the next question.
<<ask the user to accept, edit again or reject>>=
print("[e]dit again, [a]ccept, [r]eject: ", end="", file=sys.stderr)
action = input()
while True:
if action in {"A", "a", "accept"}:
return question
elif action in {"R", "r", "reject"}:
return None
elif action in {"E", "e", "edit"}:
return edit_question(question, required_tags,
question_filter, exam_questions)
else:
print("[e]dit again, [a]ccept, [r]eject: ", end="", file=sys.stderr)
action = input()
@ If the question was modified by the filters we should act differently, we
need to add an option to edit the filtered or unfiltered version again.
<<ask the user to accept, edit filtered or unfiltered, or reject>>=
print("edit [f]iltered or [u]nfiltered, [a]ccept filtered, [r]eject: ",
end="", file=sys.stderr)
action = input()
while True:
if action in {"A", "a", "accept"}:
return filtered_question
elif action in {"R", "r", "reject"}:
return None
elif action in {"F", "f", "filtered"}:
return edit_question(filtered_question, required_tags,
question_filter, exam_questions)
elif action in {"U", "u", "unfiltered"}:
return edit_question(question, required_tags,
question_filter, exam_questions)
else:
print("[e]dit again, [a]ccept, [r]eject: ", end="", file=sys.stderr)
action = input()
@ If the question is no longer qualified, the user must edit or reject.
<<ask the user to reject or edit again>>=
print("[e]dit again, [r]eject: ", end="", file=sys.stderr)
action = input()
while True:
if action in {"R", "r", "reject"}:
return None
elif action in {"E", "e", "edit"}:
return edit_question(question, required_tags,
question_filter, exam_questions)
else:
print("[e]dit again, [r]eject: ", end="", file=sys.stderr)
action = input()
@
\section{Usage examples}
\input{example.tex}
\section*{Acknowledgements}
\input{acknowledgements.tex}
\printbibliography
\section*{An index of the code blocks}
\nowebchunks
\end{document}