-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathEN_dataset.json
846 lines (846 loc) · 488 KB
/
EN_dataset.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
{
"queries": {
"35542879-0e1d-4d2b-b492-26f8263e1563": "What are the main objectives of the proposal for a Regulation laying down harmonised rules on artificial intelligence (Artificial Intelligence Act) according to the explanatory memorandum?",
"1ce09904-ac01-4b4b-8e0c-c498e486dcd1": "How does the proposal for a Regulation on artificial intelligence aim to address the risks associated with the use of AI while promoting the uptake of AI in the European Union, as outlined in the context information?",
"350653f5-d664-43d7-9920-14d2b3b665a9": "How does the proposed regulatory framework on Artificial Intelligence aim to ensure the safety of AI systems while respecting fundamental rights and Union values?",
"ee8c2cce-a595-4d56-b33d-d313e4a7eb9d": "What specific recommendations and resolutions have been made by the European Parliament and European Council regarding the development, deployment, and use of AI, robotics, and related technologies in the European Union?",
"bf7ba2cf-aad9-40bd-95a0-d6547cf714e6": "How does the proposed regulatory system for AI in the Union aim to balance trade restrictions with ensuring safety and respect for fundamental rights throughout the AI systems' lifecycle?",
"dadee1d1-6a5a-4d91-a2f9-5f18905bfae5": "How does the proposal ensure consistency with existing Union legislation, such as the EU Charter of Fundamental Rights, data protection laws, and sectoral safety legislation, in relation to the development and use of high-risk AI systems?",
"87e08f81-a852-44bf-aa44-149349c54167": "How does the proposal for a Machinery Regulation reflect the approach to ensuring overall safety of AI systems integrated into final products?",
"edac11b7-fb8a-4314-a890-d8fe6abfa1a7": "How does the proposal for AI regulation align with the Commission's overall digital strategy and contribute to promoting technology that respects people's rights and earns their trust in Europe?",
"c8296626-1592-4ddc-a08f-186afc061cb5": "How does the proposal aim to address the potential fragmentation of the internal market in the European Union regarding AI products and services, and what are the key objectives of the proposal in this regard?",
"8dd7b9c5-6f8b-4513-8c7f-b8a459b28b85": "Explain the rationale behind the proposal's approach to proportionality and the differentiation between high-risk and non-high-risk AI systems in terms of regulatory burdens and requirements.",
"9ebfcd02-f33f-42e8-ae45-bff51f06772a": "Why is the choice of a regulation as a legal instrument justified for the new rules regarding AI, and how does it contribute to reducing legal fragmentation and facilitating the development of a single market for lawful, safe, and trustworthy AI systems?",
"40d0cd82-ae3c-434c-9600-a6925d9e2406": "What were the key findings and general agreement among stakeholders regarding the need for legislative action in the consultation process for the White Paper on Artificial Intelligence, and how did stakeholders emphasize the importance of a technology-neutral and proportionate regulatory framework?",
"ebe7b2af-2c41-47ef-87c9-5b3d56b994ea": "What were the key requirements set out in the High-Level Expert Group on AI (HLEG) ethics guidelines for Trustworthy AI, and how were they developed and revised?",
"2f0f9c9a-cc37-4dab-a1a3-555a8def0b98": "Compare and contrast the different policy options assessed by the Commission to achieve the general objective of the proposal for ensuring the proper functioning of the single market in the development and use of trustworthy AI in the Union.",
"c86e90de-4443-4325-8119-8bff5fe9aadf": "Compare and contrast the different policy options assessed by the Commission for regulating AI systems in the European Union, highlighting the preferred option and its rationale.",
"2f98b9cb-90f6-4687-b137-2d36c81dacd4": "Discuss the potential impacts of the preferred regulatory framework for high-risk AI systems on different stakeholders, including businesses, conformity assessment bodies, individuals, and researchers, as outlined in the Impact assessment supporting the proposal.",
"a722e121-d222-41b1-aca3-d19229ae3261": "How does the proposal aim to promote trust among customers of companies using AI, and what specific measures are outlined to achieve this goal?",
"44e1618d-e5ce-4cbb-80fe-a4bff0d7f00e": "In what ways does the proposal seek to ensure a high level of protection for fundamental rights in the use of AI, and how are potential risks addressed through a risk-based approach?",
"45576c86-87d5-4d6a-ab40-db2e6cab1f5a": "How does the proposal ensure effective redress for individuals affected by infringements of fundamental rights in the development and use of high-risk AI technology?",
"ea92e976-b259-484e-8dc5-66b31670efe2": "What are the key provisions outlined in Title II of the proposal regarding prohibited artificial intelligence practices?",
"298d3909-cce9-4a64-a7fe-cd67eafd7460": "How does the legal framework in the document aim to ensure technology neutrality and future-proofing in defining AI systems?",
"af48022c-48ea-42bf-8654-f2d15c8e3eb2": "What are the key components of the regulations regarding high-risk AI systems, including classification rules and mandatory requirements for compliance?",
"0d1d4a79-e892-452f-9d7c-92e48b43b6e8": "How does the proposed AI framework ensure compatibility with international recommendations and principles, and what flexibility is provided to providers of AI systems in meeting the requirements?",
"78c43dc2-ff1e-4d55-885b-ffd7d2c8ac6f": "What transparency obligations are outlined for certain AI systems in Title IV of the document, and how do these obligations aim to address specific risks associated with manipulation by AI systems?",
"a6f23470-3273-4b74-ba68-14c570ea4a42": "How does the proposal for AI regulation address transparency obligations for AI systems that interact with humans, detect emotions, or generate manipulated content such as 'deep fakes'?",
"56ae400d-c583-446c-a05e-223d2167ec4c": "What measures are outlined in Title V of the document to support innovation in the development and testing of AI technologies, particularly for SMEs and start-ups?",
"5ea3e711-5f88-4e15-97a4-82c9f8a05dd0": "How does Title IX of the proposed regulation encourage providers of non-high-risk AI systems to adhere to the mandatory requirements for high-risk AI systems?",
"2d013f0d-d962-4d17-91c0-4f56b223ad3d": "What measures are outlined in Title X of the proposed regulation to ensure the effective implementation of the regulation and address infringements of the provisions?",
"af1e8e07-ceb9-4077-87b3-33b297453635": "How can the adoption of national rules for artificial intelligence ensure safety and compliance with fundamental rights obligations, and what potential consequences may arise from differing national rules within the internal market?",
"0b7ee314-97a3-4faa-b7c3-08c2f947bc66": "Why is a Union legal framework needed to regulate artificial intelligence, and how does it aim to balance fostering the development of AI while ensuring a high level of protection for public interests and fundamental rights?",
"9c781c0d-26c5-4244-943d-530a2dba4f48": "Define the notion of a remote biometric identification system as outlined in the Regulation, including the distinction between 'real-time' and 'post' systems and the key functional characteristics involved in the identification process.",
"6e5e563d-0ab3-4a13-b763-43750b5f9f70": "Explain the concept of publicly accessible space as defined in the Regulation, highlighting the criteria for determining whether a space is considered publicly accessible and providing examples of spaces that fall within this definition.",
"bbdaf4d4-2991-498e-8420-fcbcc8b2c4c5": "How does the Regulation ensure a level playing field and protection of rights and freedoms of individuals across the Union in relation to providers and users of AI systems, regardless of their establishment within the Union or in a third country?",
"96a329b7-92dd-421c-8b0a-03ec658fd849": "Why should certain AI systems fall within the scope of the Regulation even when they are not placed on the market, put into service, or used in the Union, and what measures are taken to prevent circumvention of the Regulation in such cases?",
"b491e7db-a3b6-4748-9f11-4f5c2438b07b": "Explain the reasons why certain artificial intelligence practices should be prohibited according to the context information provided, citing specific Union values and fundamental rights that are at risk of being violated.",
"1f9d8bf9-14f7-4306-8023-7faab9c0ffaa": "Discuss the ethical considerations and potential societal impacts of using AI systems for 'real-time' remote biometric identification in publicly accessible spaces for law enforcement purposes, as outlined in the context information.",
"a29b24f7-adda-4922-8e1e-493dd6e5f95c": "What are the three exhaustively listed and narrowly defined situations in which the use of 'real-time' remote biometric identification systems for law enforcement purposes is deemed appropriate, according to the context information provided?",
"cb8f77e6-1d32-4a90-a581-a7754999f688": "Why is it important for the use of 'real-time' remote biometric identification systems in publicly accessible spaces for law enforcement to be subject to specific authorisation by a judicial or administrative authority, as outlined in the context information?",
"7b2d487e-e96e-4e02-b934-beb49a366ad7": "What are the specific rules and limitations outlined in the Regulation regarding the use of AI systems for 'real-time' remote biometric identification of natural persons in publicly accessible spaces for the purpose of law enforcement?",
"8bafcb0b-84ca-4188-b28c-05987f0eeb37": "How do the mandatory requirements for high-risk AI systems aim to ensure the protection of important Union public interests and minimize potential risks to health, safety, and fundamental rights of individuals in the Union?",
"0823b08c-f32c-4c0f-a59d-6102b7e2b416": "How does the Regulation on high-risk AI systems aim to ensure the safety and protection of important Union public interests as recognized by Union law, particularly in relation to the health, safety, and fundamental rights of individuals in the Union?",
"1fccd898-07e7-4e0a-ac54-f87cdde03d78": "In what ways does the Regulation address the classification of AI systems as high-risk, especially when they are safety components of products falling within the scope of certain Union harmonization legislation, and how does it propose to amend existing acts to incorporate mandatory requirements for high-risk AI systems?",
"d098a7dc-22b8-467d-aac6-0b2101a44eae": "How are AI systems classified as high-risk under the Regulation, and what specific criteria are taken into account for their classification?",
"6a21cdcd-fb5c-464c-8c48-722e80da5fc4": "Why are AI systems used in education and employment considered high-risk, and what potential impacts do they have on individuals in these contexts?",
"5795d8c3-0854-463c-9ba7-7618dac26c7b": "How do AI systems used in the evaluation of credit score or creditworthiness of natural persons impact access to essential services and potentially perpetuate discrimination based on factors such as racial or ethnic origins, disabilities, age, or sexual orientation?",
"b5155921-7773-451f-92f5-56cbeae015b7": "Why are AI systems intended for use by law enforcement authorities, such as those for individual risk assessments or detecting 'deep fakes', classified as high-risk due to the potential adverse impacts on fundamental rights, transparency, and accountability in the context of law enforcement activities?",
"be2f5b8e-cce7-4016-8402-2d30f4e437cf": "How are high-risk AI systems used in the fields of law enforcement authorities, migration, asylum, and border control management classified and what specific activities do they encompass?",
"c226bcae-6ed6-499b-910e-b782c8834478": "What mandatory requirements apply to high-risk AI systems in order to mitigate risks for users and affected persons, according to the context information provided?",
"7f59105d-c848-49e1-a04a-1e19af690b84": "How can high-quality data sets contribute to the performance and safety of high-risk AI systems, and what measures should be implemented to ensure the data sets are relevant, representative, and free of errors?",
"13213dfd-81ab-4df6-92e6-d1e4fab03474": "Why is it important for high-risk AI systems to be transparent and accompanied by relevant documentation for users to interpret the system output and understand any potential risks to fundamental rights and discrimination?",
"9bc24bef-c923-432d-bb1f-6c1815245a9d": "How can providers ensure transparency and interpretability in high-risk AI systems according to the guidelines outlined in the document?",
"14764a25-d9f2-45c5-92c1-edb94bc4a62e": "What measures should be taken to ensure the technical robustness and cybersecurity of high-risk AI systems throughout their lifecycle, as specified in the context information?",
"cc716bb3-b702-4615-96c1-913cbf1ab105": "What are the specific responsibilities outlined for the provider of a high-risk AI system, including the establishment of a quality management system, conformity assessment procedure, documentation, and post-market monitoring system?",
"49e2f0a5-4183-4cbe-b0e1-c1fb149765d8": "How does the Regulation address the compliance of high-risk AI systems embedded in products covered by existing Union harmonisation legislation, and what approach is taken to minimize duplication and burden on operators in this regard?",
"9f6926ab-f652-404a-8b18-4cc5e5a36695": "How does the Regulation propose to minimize the burden on operators and avoid duplication in conformity assessments for high-risk AI systems related to products covered by existing Union harmonisation legislation?",
"a136870c-3607-420e-86ed-0663aa628ed8": "Under what conditions can Member States authorize the placing on the market or putting into service of AI systems that have not undergone a conformity assessment, according to the Regulation?",
"682c8f2a-c5fa-4b3d-aef2-de8218bf12cd": "What are the specific transparency obligations that certain AI systems intended to interact with natural persons or generate content may be subject to, according to the document?",
"90fab33f-4e5b-489f-8e98-c68529ef5d8a": "How do regulatory sandboxes aim to foster AI innovation and what role do they play in the development and testing of innovative AI systems under strict regulatory oversight, as outlined in the document?",
"31066827-c9f7-4bd6-9cc0-6f5468090f18": "How can Member States promote and protect innovation for small-scale providers and users of AI systems, and what specific initiatives should be developed to support them?",
"0679698f-dcbc-4385-b710-53f85457b96e": "What role does the European Artificial Intelligence Board play in the implementation of the Regulation, and what advisory tasks are they responsible for?",
"769bda47-4a52-41a3-abef-8fd007cdafa7": "How does the Regulation (EU) 2019/1020 ensure appropriate enforcement of requirements and obligations, particularly in relation to market surveillance and compliance of products?",
"63ac9716-b17d-4f4a-8a4e-95f0040fdd1c": "What measures are recommended for providers of non-high-risk AI systems to encourage the voluntary application of mandatory requirements and additional considerations, such as environmental sustainability and stakeholder participation?",
"1d7d8fc6-3c5e-4d82-bbe1-626b268a7a58": "What are the specific requirements for high-risk AI systems and the obligations for operators of such systems according to the Regulation?",
"d7bf6bb7-10f1-4133-b268-279ed5acc466": "How are penalties, including administrative fines, addressed in the Regulation and when do these provisions come into effect?",
"cd2ab6e0-f40b-40fa-9299-3c28642f7585": "What are the key provisions laid down in the Regulation regarding artificial intelligence systems, including prohibitions, requirements for high-risk AI systems, transparency rules, and market monitoring?",
"2a2e6c9d-8852-49ca-9df9-e0d454961918": "Can you explain the definitions provided in the Regulation for terms such as 'artificial intelligence system,' 'provider,' 'user,' 'authorised representative,' 'importer,' 'distributor,' and 'operator'?",
"b12561fe-a769-4a8d-87c0-26291bff5897": "What is the definition of 'placing on the market' in the context of AI systems according to the provided information?",
"5074cab6-a139-46de-a15b-e53e6dfcd410": "Explain the concept of 'post-market monitoring' as it relates to the responsibilities of providers of AI systems, based on the context information given.",
"3dd13556-7566-4640-86a9-8474a00d807e": "What is the definition of 'post-market monitoring' as per the Regulation mentioned in the context information?",
"a18073e6-90e8-4f2b-a9b0-454cef8733d3": "Define and differentiate between 'biometric categorisation system' and 'remote biometric identification system' as outlined in the Regulation.",
"308a159f-05ed-440c-8b7e-9ccd66b99909": "Explain the difference between a 'real-time' remote biometric identification system and a 'post' remote biometric identification system, including the key characteristics of each.",
"14359508-c8d5-479c-b25b-c508043c8d6b": "Define the term 'serious incident' as outlined in the context information provided, and provide examples of incidents that would fall under this definition.",
"8f1722f5-06c6-4e66-9833-b0e5191eaab4": "What are the prohibited artificial intelligence practices outlined in Article 5 of the document, and what are the specific criteria for each practice to be considered prohibited?",
"737cdf8d-8b00-4c7d-86ca-085819633c2d": "In the context of law enforcement, what are the necessary elements and conditions that must be taken into account when using 'real-time' remote biometric identification systems in publicly accessible spaces, as stated in paragraphs 2 and 3 of the document?",
"e456b78c-03af-48ce-b857-bfeb9bf79a1a": "What are the conditions that must be fulfilled for an AI system to be considered high-risk according to the classification rules outlined in Article 6?",
"3f95b6ba-b6d0-42e0-9afc-4bcdcbeeb974": "What criteria must be met for the Commission to add high-risk AI systems to Annex III, as stated in Article 7?",
"60c5d46a-6d91-4e70-b810-1d2f2b869961": "How does the Commission determine which AI systems should be considered high-risk and added to Annex III, according to Article 7?",
"263fed9a-7b3d-42d7-8c6b-dabebda554c6": "What are the key components of a risk management system that must be established for high-risk AI systems, as outlined in Article 9?",
"1f6f99d8-f16a-4bab-9acf-2a789e2752cc": "How should high-risk AI systems be tested in order to identify the most appropriate risk management measures, and what considerations should be taken into account during the testing process?",
"6c2468b8-6a75-4771-9c2e-bf8de9daba41": "What are the key data governance and management practices that should be followed when developing high-risk AI systems that make use of techniques involving the training of models with data, and why are these practices important for ensuring the quality of the training, validation, and testing data sets?",
"5c0a8019-f164-49f5-93bd-55903e065a52": "How should training, validation, and testing data sets be selected and prepared for high-risk AI systems according to the regulations outlined in the document?",
"b7503949-77f5-424e-85a6-e278400be4ff": "What requirements must be met in terms of technical documentation for high-risk AI systems before they are placed on the market or put into service, as specified in Article 11 of the document?",
"66cc0ee1-f6ca-4a43-9787-4f07e5075c51": "How should high-risk AI systems be designed and developed to ensure transparency for users, and what information should be included in the instructions for use according to Article 13 of the regulation?",
"673521c3-5c68-43f8-84e0-894b169c5fe7": "What measures should be implemented for human oversight of high-risk AI systems, and how can individuals assigned to oversee the AI system effectively monitor its operation and address any anomalies or automation bias as outlined in Article 14?",
"4b15933f-632d-403e-ae1e-2425e20cca6f": "How can providers of high-risk AI systems ensure the accuracy, robustness, and cybersecurity of their systems according to the regulations outlined in Article 15?",
"f1a67057-8961-4765-87ad-3078b1cb75bb": "What are the obligations of providers of high-risk AI systems as stated in Article 16, and how do these obligations contribute to ensuring compliance with the requirements set out in Chapter 2 of the Regulation?",
"b6dc219a-3ce8-44f5-ae48-7bf0d2547f80": "What are the key aspects that providers of high-risk AI systems must include in their quality management system according to Article 17 of the Regulation?",
"21f17b0c-85cb-4c9a-a39b-892f4c3c45d2": "How are providers of high-risk AI systems required to handle the logs automatically generated by their systems, as outlined in Article 20 of the Regulation?",
"004419d0-ea67-4b94-a474-397c5da5ff25": "What are the obligations of providers of high-risk AI systems in terms of keeping logs, corrective actions, duty of information, cooperation with competent authorities, and obligations of product manufacturers as outlined in Articles 20-24 of the Regulation?",
"695ec8ef-8bc9-4c0c-8619-5f835d7b9a62": "In what circumstances are providers of high-risk AI systems required to appoint an authorized representative established in the Union, and what tasks are they empowered to carry out according to Article 25 of the Regulation?",
"230080f1-d70b-4d74-98e4-934cdafabed1": "What are the specific tasks that the authorised representative of a high-risk AI system is empowered to carry out according to the regulations outlined in the document?",
"e28534bb-e95e-40c6-95f7-edd4a8195840": "What are the obligations of importers and distributors of high-risk AI systems as detailed in the regulations provided in the context information?",
"645b3fb0-3972-4a9c-8132-495d6acf17d7": "What are the obligations of distributors, importers, users, or any other third-party under Article 28 of the Regulation regarding high-risk AI systems?",
"ad329bff-0a2b-4977-b401-dd926c5e0573": "According to Article 29, what are the specific obligations imposed on users of high-risk AI systems, particularly in terms of monitoring, informing providers or distributors, and keeping logs?",
"ebc135a5-2a4f-4e1b-8742-7394bbe26882": "What are the obligations of users of high-risk AI systems regarding the maintenance of logs, and how does this relate to Directive 2013/36/EU for credit institutions?",
"92e91c85-c988-4ae8-b7c2-e32540b7cca8": "Describe the role and responsibilities of notifying authorities in the context of conformity assessment bodies and the notification process outlined in the document.",
"5dc17a4e-94e4-4e8f-a24e-9d770faef615": "What are the requirements that notified bodies must satisfy in order to verify the conformity of high-risk AI systems, as outlined in Article 33 of the document?",
"2b531e83-a776-4195-a1ec-ae6652b8c03e": "How are notified bodies expected to ensure independence, objectivity, and impartiality in their conformity assessment activities, according to the regulations described in the document?",
"0b027062-f411-4751-a47c-d252d2a83808": "What steps must a notified body take when subcontracting specific tasks connected with conformity assessment or when using a subsidiary, according to Article 34 of the regulation?",
"6fb9cefd-c4e5-4e34-ac19-88fb5fb3cafb": "How are high-risk AI systems presumed to be in conformity with the requirements set out in Chapter 2 of the regulation, as per Article 40?",
"574650a9-c527-4e52-9db7-98b888848dd4": "What are the two conformity assessment procedures that providers of high-risk AI systems listed in point 1 of Annex III must follow when demonstrating compliance with the requirements set out in Chapter 2 of the Regulation?",
"803acb47-093c-4a8c-9d64-0db9282d9a4e": "In what circumstances should the market surveillance authority act as a notified body for the conformity assessment procedure of high-risk AI systems intended to be put into service by law enforcement, immigration or asylum authorities, as well as EU institutions, bodies, or agencies?",
"822e3c52-e35d-4f15-bad9-cb64324c6db5": "What are the requirements for high-risk AI systems in terms of conformity assessment procedures, especially when intended for use by law enforcement, immigration, asylum authorities, EU institutions, bodies, or agencies?",
"6fdd278c-5d97-4d80-8316-c44fd591d312": "How are certificates issued by notified bodies regulated in terms of validity, extension, and potential suspension or withdrawal based on compliance with requirements for AI systems?",
"98e7c8a5-3d75-4923-9371-5ee046fa806a": "What are the conditions under which a notified body may suspend or withdraw a certificate issued for an AI system, and what steps must be taken by the provider to ensure compliance with the requirements set out in Chapter 2 of the Title?",
"e8176bcb-f8ee-4049-a159-351ed2d587d1": "Under what circumstances can a market surveillance authority authorize the placing on the market of specific high-risk AI systems, and what steps must be taken to ensure compliance with the requirements of Chapter 2 of the Title during this authorization process?",
"7f9c3728-739b-4e2e-9646-c377e9e2b55a": "What are the steps that the Commission must take if objections are raised by a Member State against an authorisation issued by another Member State's market surveillance authority?",
"7bb312dd-abd9-4331-9b96-d2e804ea74f9": "What information must be included in the EU declaration of conformity for high-risk AI systems, according to Article 48 of the document?",
"5f3230b1-8b25-45e8-a76e-a915b9b64f1b": "What are the specific documentation requirements that the provider of a high-risk AI system must retain for a period of 10 years after the system has been placed on the market or put into service, according to Article 50 of the regulation?",
"dec2d034-015e-480d-8301-d185012d0f3c": "In what circumstances are providers required to inform natural persons that they are interacting with an AI system, as outlined in Article 52 of the regulation?",
"cd3ee4a5-88b4-4532-8242-0bcf46b57519": "What are the key conditions under which personal data can be processed for developing and testing innovative AI systems in the AI regulatory sandbox, as outlined in Article 54?",
"49ad8201-d62c-439f-9ff0-bbad0b177425": "How are small-scale providers and start-ups supported in the context of the AI regulatory sandboxes, according to the measures outlined in Article 55?",
"ae93f02b-aa93-423f-8d3f-c1120524913d": "What are the specific actions that Member States are required to undertake in support of small-scale providers and users according to Article 55 of the Regulation?",
"1ef35f06-303f-4316-9353-6447cbf20878": "What are the key tasks and responsibilities of the European Artificial Intelligence Board as outlined in Articles 56 and 58 of the Regulation?",
"1d44f6f5-ccc6-49a5-8d0d-a3f8a6b16648": "What are the key responsibilities of national competent authorities as outlined in Article 59 of the Regulation, and how are they organized to ensure objectivity and impartiality in their activities?",
"2254a295-94b9-4f51-b51f-af85dc008b4e": "Explain the requirements for post-market monitoring of high-risk AI systems as detailed in Article 61, including the establishment of a monitoring system, collection of relevant data, and integration with existing legislation for certain AI systems.",
"67b11309-b3e2-4c03-a5b0-47762008339b": "What are the requirements for reporting serious incidents and malfunctioning of high-risk AI systems placed on the Union market, and what is the timeline for notification to market surveillance authorities?",
"cf023089-943c-48c9-bbac-888d1abe7d3d": "How does the enforcement of this Regulation differ for high-risk AI systems related to products covered by legal acts listed in Annex II compared to AI systems used for law enforcement purposes?",
"6484e0d0-6054-4ff9-80a6-c99249d99d65": "What are the responsibilities of market surveillance authorities in relation to AI systems used by financial institutions regulated by Union legislation on financial services and AI systems used for law enforcement purposes?",
"7156513b-5306-451d-bad4-d7b661e4b2bc": "How do national public authorities or bodies supervise or enforce the respect of obligations under Union law protecting fundamental rights in relation to the use of high-risk AI systems, and what actions can they take in case of insufficient documentation?",
"4c0b72d8-6017-43a8-8160-c624d7911586": "What are the steps that a market surveillance authority must take if they find that an AI system does not comply with the requirements and obligations laid down in the Regulation?",
"782ddf45-61e3-4755-ba10-1e2960859ee9": "In the event of non-compliance of an AI system that extends beyond national territory, what actions must the market surveillance authority take and who must they inform?",
"ed9a4a15-bd9e-4136-8f74-6f9e8f983360": "How does the Union safeguard procedure outlined in Article 66 of the Regulation address objections raised by Member States against national measures related to AI systems, and what actions are required if a national measure is considered justified or unjustified?",
"878d51a9-3f7c-483b-a458-5f29bf488d12": "In accordance with Article 67, what steps must be taken by the market surveillance authority of a Member State if an AI system, while compliant with the Regulation, presents a risk to health, safety, fundamental rights, or other aspects of public interest protection?",
"715a29fe-c93a-411a-b3cc-3815211ce498": "How can the Commission and Member States encourage the voluntary application of requirements to AI systems through the use of codes of conduct, and what specific areas can these codes cover according to Article 69 of the Regulation?",
"04015c1a-33fa-4800-a546-2592b1a3a7b1": "What measures are outlined in Article 70 regarding the confidentiality of information obtained by national competent authorities and notified bodies in the application of the Regulation, and how do these measures ensure the protection of intellectual property rights and public and national security interests?",
"53a0fb71-2773-46bd-91bf-fe82cbcb4830": "What are the penalties for non-compliance with the prohibition of artificial intelligence practices as outlined in Article 5 of the Regulation?",
"01055908-de6f-486a-86d8-89b498b5529a": "How does the European Data Protection Supervisor determine the amount of administrative fines to impose on Union institutions, agencies, and bodies for infringements related to artificial intelligence practices and requirements under the Regulation?",
"559f2fd3-e188-4f04-a7ea-7d0775bae9f2": "What are the potential administrative fines for non-compliance of an AI system with requirements under the Regulation, excluding those in Articles 5 and 10?",
"12ad50c8-1c81-4519-a515-da5548ea39c0": "How is the delegation of power for adopting delegated acts handled by the Commission according to Article 73 of the Regulation?",
"203be7be-62fc-4747-b78b-bb7569f13570": "How does Regulation (EU) YYY/XX on Artificial Intelligence impact the requirements for safety components in various EU regulations such as Directive 2014/90/EU and Regulation (EU) 2018/1139?",
"73164917-31cf-4a39-b847-e13fad694dce": "In what ways do the amendments to Directive (EU) 2016/797 and Regulation (EU) 2018/858 reflect the consideration of requirements set out in Title III, Chapter 2 of Regulation (EU) YYY/XX on Artificial Intelligence for safety components?",
"0d884677-0091-4adf-8f57-dd939a3ee4fd": "How does Regulation (EU) YYY/XX on Artificial Intelligence impact the adoption of delegated acts and implementing acts concerning AI systems that are safety components?",
"a20a86bf-2c3b-4026-ba2c-f29e44c386f2": "In what circumstances does Regulation (EU) YYY/XX on Artificial Intelligence not apply to AI systems that are components of large-scale IT systems established by certain legal acts listed in Annex IX?",
"77319657-3bb7-4ca5-8a41-523a910404f0": "What specific aspects does the Commission need to evaluate and report on regarding the implementation of the Regulation, and to whom are these reports to be submitted?",
"27e05db8-96e6-430c-ad1d-8f8a7a3eab78": "How does the Regulation outline the timeline for the application of specific chapters and articles, and what derogations are mentioned in relation to the entry into force of the Regulation?",
"9dac45b7-4501-48fc-a61c-49656ef43127": "How does the proposal/initiative aim to ensure the development and use of trustworthy artificial intelligence in the Union, and what are the specific objectives outlined to achieve this goal?",
"31ee4bcb-1f3d-4577-8445-a17b2c2f92fc": "What are the expected results and impacts of the proposal/initiative on AI suppliers, AI users, and consumers, and how are these effects measured through indicators of performance?",
"b49b7176-1051-4fa5-a921-bdfccbeb9390": "How will the proposed Regulation on Artificial Intelligence benefit AI suppliers, AI users, and consumers, and what indicators will be used to monitor its implementation?",
"c52749c0-26cf-4325-8b3d-7d2e61b855e6": "What lessons can be learned from the shortcomings of the E-commerce Directive in terms of cooperation mechanisms among Member States, and how will the proposed Regulation address these issues through a governance structure at the EU level?",
"87350ed4-5224-4962-9321-2dafa8de6fef": "What specific cooperation mechanism at EU level is required for the implementation of the obligations laid down in the Regulation Laying Down Harmonised Rules on Artificial Intelligence?",
"a1c66485-5d69-426a-ae92-bc7e23e5c0c8": "How many Full-Time Equivalents (FTE) are estimated to be required for the enforcement of the new Regulation, and what specific roles will these FTEs fulfill in the implementation of the Regulation?",
"6afe7a04-d0e1-45be-a1d0-7f34d652b5fc": "How many Full-Time Equivalents (FTE) are estimated to be required for the enforcement of the new Regulation, and how are these FTE distributed between the activities of the Board and the European Data Protection Supervisor?",
"52164ce7-f8e0-4e75-a31a-2dc9e18c3dfe": "What measures are in place to prevent fraud and irregularities in the implementation of the Regulation, and how are existing fraud prevention measures applicable to the Commission expected to cover the additional appropriations necessary for this Regulation?",
"6d3e3117-3b45-4b40-b785-9f979c597de0": "What is the total amount of appropriations for DG CNECT in the multiannual financial framework for commitments and payments?",
"779b49f0-bd3d-487d-bd26-63cade8d02d4": "How much is allocated for human resources and other administrative expenditure for DG CNECT in the years 2023 to 2027?",
"f576b484-825b-41fa-8892-265dee270394": "What are the total appropriations for DG CNECT and the European Data Protection Supervisor for the years 2022 to 2027? Provide the breakdown of appropriations for each year.",
"3113eeb1-e8ba-4761-be4d-da4d183311ed": "Based on the estimated output funded with operational appropriations, how many database outputs, meetings outputs, and communication activities are planned for specific objective No 1? What is the total cost associated with these outputs?",
"007eadc6-6366-4453-bc52-173df63d0390": "What is the total amount of administrative appropriations required for the proposal/initiative in the years 2022 to 2027, broken down by human resources and other administrative expenditure categories?",
"01836290-e509-4eef-b30d-35fbc47bac18": "How are the appropriations for human resources and other administrative expenditure of an administrative nature planned to be funded for the proposal/initiative, according to the document?",
"d6c8681d-5bb0-4383-8eff-764a5784bf7d": "How many full-time equivalent units of external staff are estimated to be required for the proposal/initiative in the years 2023-2027?",
"a6588e8d-673f-49ff-b1c2-1c6cfedc7053": "How many AD FTE and AST FTE are estimated to be required for officials and temporary staff to carry out tasks such as preparing meetings, drafting reports, and maintaining relations with Member States' authorities?",
"479b15f5-8e07-49c1-9eef-f9105ab613a3": "How many full-time equivalent (FTE) staff are estimated to be required to fulfill the EDPS responsibilities under the draft legislation based on past experience?",
"7d9757a4-f0fc-407d-ae2e-3a2393f5aa65": "How is the proposal/initiative in the document expected to be financed within the Multiannual Financial Framework (MFF)?",
"50c830d4-365f-4a69-950b-6d3f9e91a35a": "What were the key conclusions reached in the European Council's special meeting on October 1st and 2nd, 2020, as outlined in document EUCO 13/20?",
"d41daa09-f3e0-4f94-8a71-ff696f1720f8": "How does the European Parliament address the ethical aspects of artificial intelligence, robotics, and related technologies in its resolution of October 20, 2020, as referenced in document 2020/2012(INL)?",
"380c4024-c30c-4176-b1ee-4164f38e2ead": "What are the key documents and initiatives mentioned in the context information related to the regulation and governance of digital services and artificial intelligence in the European Union?",
"0fed0844-0cf4-4070-8d7b-d8b1149991a3": "How do the European Commission and other stakeholders address the ethical aspects and trustworthiness of artificial intelligence in the European Union, as outlined in the context information?",
"4a5c8522-a95f-4a50-87f6-ee9e2e0c21e5": "What are the key regulations and directives mentioned in the context information related to data protection, aviation security, vehicle approval, and marine equipment?",
"6ca44c4c-c784-47e0-9301-9f9eb2b54eee": "How does Directive (EU) 2016/797 of the European Parliament and of the Council of 11 May 2016 contribute to the interoperability of the rail system within the European Union, as outlined in the context information?",
"60fa7103-2d9e-469e-8825-7b38c412243e": "What is the purpose of Directive 2016/797 and how does it relate to the interoperability of the rail system within the European Union?",
"5bd69dd6-3e97-4ff8-b29c-b3b2a21a5ba0": "How does Regulation 2017/745 on medical devices differ from Regulation 2017/746 on in vitro diagnostic medical devices in terms of their scope and impact on existing directives?",
"5df35a6d-ae7f-4f60-964d-ef34325af6a1": "What is the purpose of Regulation (EU) 2017/746 and which directives and decisions does it repeal?",
"b0723e17-2500-4f2f-b53b-6b00e04797fd": "How does Directive 2001/95/EC relate to the general safety of products in the European Union?",
"90179cff-0e39-4e45-ad02-7560c36165ef": "What is the purpose of Directive 2001/95/EC of the European Parliament and of the Council of 3 December 2001 on general product safety?",
"cad29db6-2589-4c02-93ea-51c300b50edc": "What is the significance of Regulation (EU) 2019/881 of the European Parliament and of the Council of 17 April 2019 on ENISA (the European Union Agency for Cybersecurity) and on information and communications technology cybersecurity certification in the context of EU regulations?",
"1515458e-8a2f-4594-b23f-e75a33ce8a0c": "What are the different types of artificial intelligence techniques and approaches mentioned in Annex I of the Proposal for a Regulation of the European Parliament and of the Council on Artificial Intelligence?",
"6bd5a001-f63c-4ba2-90eb-e6e69e0cca50": "Can you provide examples of Union harmonisation legislation based on the New Legislative Framework as listed in Section A of Annex II of the Proposal for a Regulation of the European Parliament and of the Council on Artificial Intelligence?",
"5ec39daf-0e0b-4b13-85a5-f1e7ab108c6a": "What is the purpose of Directive 2014/68/EU of the European Parliament and of the Council?",
"2e6e81ae-21b5-417a-bdcd-86459b8bf05c": "How does Regulation (EU) 2017/746 of the European Parliament and of the Council impact in vitro diagnostic medical devices?",
"f820382d-ee5b-4d58-823f-ad48b0e2af49": "Explain the significance of Regulation (EU) 2018/1139 in the field of civil aviation and the establishment of the European Union Aviation Safety Agency.",
"bc2f027f-029b-45ac-8554-4ab2f0c99d7e": "Discuss the implications of including AI systems in the management and operation of critical infrastructure, specifically in the context of road traffic and the supply of water, gas, heating, and electricity.",
"bc717280-dadf-4593-868d-03bb7ecee21d": "In the context of high-risk AI systems, explain the potential implications of using AI systems for the evaluation of the creditworthiness of natural persons and the establishment of their credit score.",
"f6abd919-3ad4-4859-87bc-9c7e2a5f3ca2": "Discuss the role of AI systems in assisting law enforcement authorities in making individual risk assessments of natural persons for offending or reoffending, as well as predicting the occurrence or reoccurrence of criminal offenses based on profiling.",
"d8b124f6-7347-408d-ae9e-2304383a45c8": "How does the technical documentation for AI systems, as outlined in Annex IV, ensure transparency and accountability in the development and deployment of AI systems intended for assisting judicial authorities in the administration of justice?",
"1faf7245-40ae-4a57-a16e-632b80689295": "Explain the significance of including detailed descriptions of elements such as design specifications, data requirements, and validation procedures in the technical documentation of AI systems, as required by Article 11(1) of the document.",
"8ab7f89f-4992-47d6-a286-0334e186fa70": "Explain the importance of human oversight measures in accordance with Article 14 in the context of monitoring, functioning, and controlling AI systems. How do these measures contribute to ensuring the accuracy and interpretation of AI system outputs by users?",
"7e858825-18f8-4b47-9e07-8c4b96389585": "Describe the process of conformity assessment based on assessment of quality management system and assessment of technical documentation as outlined in Annex VII. What are the key components of the quality management system that need to be assessed, and how does this assessment contribute to ensuring compliance with the essential requirements set out in Title III, Chapter 2 of the Regulation?",
"795303b6-2927-49a5-a62e-c0ef4612ace7": "What are the key components that must be included in the application for the assessment of a provider's quality management system for AI systems, as outlined in points 3.1(a)-(f) of the document?",
"e39802bc-4678-4eff-a818-71db078abc93": "Describe the process and requirements for the assessment of the technical documentation relating to an AI system by a notified body, as detailed in points 4.1-4.6 of the document.",
"92ce6bdf-23c2-49a9-81c7-bd4942a42c55": "What are the specific requirements for the data used to train an AI system, and what actions need to be taken if the AI system does not meet these requirements according to the document?",
"ec5790ea-93ca-4efe-ab03-ce5e61fc3870": "In the context of surveillance of the approved quality management system, what are the responsibilities of the notified body and the provider, and how is compliance ensured through periodic audits and additional tests of AI systems?",
"968485fa-ab5c-4a4e-8729-397b52ac2b8c": "What are the key regulations governing the use of the Schengen Information System in the areas of border checks, police cooperation, and judicial cooperation in criminal matters?",
"9e92e57f-d505-4583-b9bb-1091e0c6beb5": "How does the European Travel Information and Authorisation System (ETIAS) aim to enhance travel security and control within the European Union?",
"b59cc0b4-d287-47c5-9678-80d2a4324b72": "How does Regulation (EU) 2018/1241 contribute to enhancing security measures within the European Union in relation to travel information and authorisation?",
"5c0a8b97-5ea4-486d-8a01-3b8295a399bb": "Explain the purpose and significance of Regulation (EU) 2019/816 in establishing a centralised system for the identification of Member States holding conviction information on third-country nationals and stateless persons."
},
"corpus": {
"03245578-ca1e-4d85-ac9e-4fca491a8fa4": "![european flag](./../../../../images/eclogo.jpg)EUROPEAN COMMISSION\n\nBrussels, 21.4.2021\n\nCOM(2021) 206 final\n\n2021/0106(COD)\n\nProposal for a\n\nREGULATION OF THE EUROPEAN PARLIAMENT AND OF THE COUNCIL\n\nLAYING DOWN HARMONISED RULES ON ARTIFICIAL INTELLIGENCE (ARTIFICIAL\nINTELLIGENCE ACT) AND AMENDING CERTAIN UNION LEGISLATIVE ACTS\n\n{SEC(2021) 167 final} - {SWD(2021) 84 final} - {SWD(2021) 85 final}\n\n \n\nEXPLANATORY MEMORANDUM\n\n1.CONTEXT OF THE PROPOSAL\n\n1.1.Reasons for and objectives of the proposal\n\nThis explanatory memorandum accompanies the proposal for a Regulation laying\ndown harmonised rules on artificial intelligence (Artificial Intelligence\nAct). Artificial Intelligence (AI) is a fast evolving family of technologies\nthat can bring a wide array of economic and societal benefits across the\nentire spectrum of industries and social activities. By improving prediction,\noptimising operations and resource allocation, and personalising service\ndelivery, the use of artificial intelligence can support socially and\nenvironmentally beneficial outcomes and provide key competitive advantages to\ncompanies and the European economy. Such action is especially needed in high-\nimpact sectors, including climate change, environment and health, the public\nsector, finance, mobility, home affairs and agriculture. However, the same\nelements and techniques that power the socio-economic benefits of AI can also\nbring about new risks or negative consequences for individuals or the society.\nIn light of the speed of technological change and possible challenges, the EU\nis committed to strive for a balanced approach. It is in the Union interest to\npreserve the EU\u2019s technological leadership and to ensure that Europeans can\nbenefit from new technologies developed and functioning according to Union\nvalues, fundamental rights and principles.\n\nThis proposal delivers on the political commitment by President von der Leyen,\nwho announced in her political guidelines for the 2019-2024 Commission \u201cA\nUnion that strives for more\u201d 1 , that the Commission would put forward\nlegislation for a coordinated European approach on the human and ethical\nimplications of AI. Following on that announcement, on 19 February 2020 the\nCommission published the White Paper on AI - A European approach to excellence\nand trust 2 . The White Paper sets out policy options on how to achieve the\ntwin objective of promoting the uptake of AI and of addressing the risks\nassociated with certain uses of such technology. This proposal aims to\nimplement the second objective for the development of an ecosystem of trust by\nproposing a legal framework for trustworthy AI. The proposal is based on EU\nvalues and fundamental rights and aims to give people and other users the\nconfidence to embrace AI-based solutions, while encouraging businesses to\ndevelop them. AI should be a tool for people and be a force for good in\nsociety with the ultimate aim of increasing human well-being. Rules for AI\navailable in the Union market or otherwise affecting people in the Union\nshould therefore be human centric, so that people can trust that the\ntechnology is used in a way that is safe and compliant with the law, including\nthe respect of fundamental rights. Following the publication of the White\nPaper, the Commission launched a broad stakeholder consultation, which was met\nwith a great interest by a large number of stakeholders who were largely\nsupportive of regulatory intervention to address the challenges and concerns\nraised by the increasing use of AI.\n\nThe proposal also responds to explicit requests from the European Parliament\n(EP) and the European Council, which have repeatedly expressed calls for\nlegislative action to ensure a well-functioning internal market for artificial\nintelligence systems (\u2018AI systems\u2019) where both benefits and risks of AI are\nadequately addressed at Union level. It supports the objective of the Union\nbeing a global leader in the development of secure, trustworthy and ethical\nartificial intelligence as stated by the European Council 3 and ensures the\nprotection of ethical principles as specifically requested by the European\nParliament 4 .\n\nIn 2017, the European Council called for a \u2018sense of urgency to address\nemerging trends\u2019 including \u2018issues such as artificial intelligence \u2026, while at\nthe same time ensuring a high level of data protection, digital rights and\nethical standards\u2019 5 .",
"11dd086d-6c11-4a19-8ce1-807d63b3ac2a": "The proposal also responds to explicit requests from the European Parliament\n(EP) and the European Council, which have repeatedly expressed calls for\nlegislative action to ensure a well-functioning internal market for artificial\nintelligence systems (\u2018AI systems\u2019) where both benefits and risks of AI are\nadequately addressed at Union level. It supports the objective of the Union\nbeing a global leader in the development of secure, trustworthy and ethical\nartificial intelligence as stated by the European Council 3 and ensures the\nprotection of ethical principles as specifically requested by the European\nParliament 4 .\n\nIn 2017, the European Council called for a \u2018sense of urgency to address\nemerging trends\u2019 including \u2018issues such as artificial intelligence \u2026, while at\nthe same time ensuring a high level of data protection, digital rights and\nethical standards\u2019 5 . In its 2019 Conclusions on the Coordinated Plan on the\ndevelopment and use of artificial intelligence Made in Europe 6 , the Council\nfurther highlighted the importance of ensuring that European citizens\u2019 rights\nare fully respected and called for a review of the existing relevant\nlegislation to make it fit for purpose for the new opportunities and\nchallenges raised by AI. The European Council has also called for a clear\ndetermination of the AI applications that should be considered high-risk 7 .\n\nThe most recent Conclusions from 21 October 2020 further called for addressing\nthe opacity, complexity, bias, a certain degree of unpredictability and\npartially autonomous behaviour of certain AI systems, to ensure their\ncompatibility with fundamental rights and to facilitate the enforcement of\nlegal rules 8 .\n\nThe European Parliament has also undertaken a considerable amount of work in\nthe area of AI. In October 2020, it adopted a number of resolutions related to\nAI, including on ethics 9 , liability 10 and copyright 11 . In 2021, those\nwere followed by resolutions on AI in criminal matters 12 and in education,\nculture and the audio-visual sector 13 . The EP Resolution on a Framework of\nEthical Aspects of Artificial Intelligence, Robotics and Related Technologies\nspecifically recommends to the Commission to propose legislative action to\nharness the opportunities and benefits of AI, but also to ensure protection of\nethical principles. The resolution includes a text of the legislative proposal\nfor a regulation on ethical principles for the development, deployment and use\nof AI, robotics and related technologies. In accordance with the political\ncommitment made by President von der Leyen in her Political Guidelines as\nregards resolutions adopted by the European Parliament under Article 225 TFEU,\nthis proposal takes into account the aforementioned resolution of the European\nParliament in full respect of proportionality, subsidiarity and better law\nmaking principles.\n\nAgainst this political context, the Commission puts forward the proposed\nregulatory framework on Artificial Intelligence with the following specific\nobjectives:\n\n\u00b7ensure that AI systems placed on the Union market and used are safe and\nrespect existing law on fundamental rights and Union values;\n\n\u00b7ensure legal certainty to facilitate investment and innovation in AI;\n\n\u00b7enhance governance and effective enforcement of existing law on fundamental\nrights and safety requirements applicable to AI systems;\n\n\u00b7facilitate the development of a single market for lawful, safe and\ntrustworthy AI applications and prevent market fragmentation.\n\nTo achieve those objectives, this proposal presents a balanced and\nproportionate horizontal regulatory approach to AI that is limited to the\nminimum necessary requirements to address the risks and problems linked to AI,\nwithout unduly constraining or hindering technological development or\notherwise disproportionately increasing the cost of placing AI solutions on\nthe market. The proposal sets a robust and flexible legal framework. On the\none hand, it is comprehensive and future-proof in its fundamental regulatory\nchoices, including the principle-based requirements that AI systems should\ncomply with. On the other hand, it puts in place a proportionate regulatory\nsystem centred on a well-defined risk-based regulatory approach that does not\ncreate unnecessary restrictions to trade, whereby legal intervention is\ntailored to those concrete situations where there is a justified cause for\nconcern or where such concern can reasonably be anticipated in the near\nfuture. At the same time, the legal framework includes flexible mechanisms\nthat enable it to be dynamically adapted as the technology evolves and new\nconcerning situations emerge.\n\nThe proposal sets harmonised rules for the development, placement on the\nmarket and use of AI systems in the Union following a proportionate risk-based\napproach. It proposes a single future-proof definition of AI. Certain\nparticularly harmful AI practices are prohibited as contravening Union values,\nwhile specific restrictions and safeguards are proposed in relation to certain\nuses of remote biometric identification systems for the purpose of law\nenforcement.",
"f9ff291f-bda2-4e17-86c1-08325a273617": "On the other hand, it puts in place a proportionate regulatory\nsystem centred on a well-defined risk-based regulatory approach that does not\ncreate unnecessary restrictions to trade, whereby legal intervention is\ntailored to those concrete situations where there is a justified cause for\nconcern or where such concern can reasonably be anticipated in the near\nfuture. At the same time, the legal framework includes flexible mechanisms\nthat enable it to be dynamically adapted as the technology evolves and new\nconcerning situations emerge.\n\nThe proposal sets harmonised rules for the development, placement on the\nmarket and use of AI systems in the Union following a proportionate risk-based\napproach. It proposes a single future-proof definition of AI. Certain\nparticularly harmful AI practices are prohibited as contravening Union values,\nwhile specific restrictions and safeguards are proposed in relation to certain\nuses of remote biometric identification systems for the purpose of law\nenforcement. The proposal lays down a solid risk methodology to define \u201chigh-\nrisk\u201d AI systems that pose significant risks to the health and safety or\nfundamental rights of persons. Those AI systems will have to comply with a set\nof horizontal mandatory requirements for trustworthy AI and follow conformity\nassessment procedures before those systems can be placed on the Union market.\nPredictable, proportionate and clear obligations are also placed on providers\nand users of those systems to ensure safety and respect of existing\nlegislation protecting fundamental rights throughout the whole AI systems\u2019\nlifecycle. For some specific AI systems, only minimum transparency obligations\nare proposed, in particular when chatbots or \u2018deep fakes\u2019 are used.\n\nThe proposed rules will be enforced through a governance system at Member\nStates level, building on already existing structures, and a cooperation\nmechanism at Union level with the establishment of a European Artificial\nIntelligence Board. Additional measures are also proposed to support\ninnovation, in particular through AI regulatory sandboxes and other measures\nto reduce the regulatory burden and to support Small and Medium-Sized\nEnterprises (\u2018SMEs\u2019) and start-ups.\n\n1.2.Consistency with existing policy provisions in the policy area\n\nThe horizontal nature of the proposal requires full consistency with existing\nUnion legislation applicable to sectors where high-risk AI systems are already\nused or likely to be used in the near future.\n\nConsistency is also ensured with the EU Charter of Fundamental Rights and the\nexisting secondary Union legislation on data protection, consumer protection,\nnon-discrimination and gender equality. The proposal is without prejudice and\ncomplements the General Data Protection Regulation (Regulation (EU) 2016/679)\nand the Law Enforcement Directive (Directive (EU) 2016/680) with a set of\nharmonised rules applicable to the design, development and use of certain\nhigh-risk AI systems and restrictions on certain uses of remote biometric\nidentification systems. Furthermore, the proposal complements existing Union\nlaw on non-discrimination with specific requirements that aim to minimise the\nrisk of algorithmic discrimination, in particular in relation to the design\nand the quality of data sets used for the development of AI systems\ncomplemented with obligations for testing, risk management, documentation and\nhuman oversight throughout the AI systems\u2019 lifecycle. The proposal is without\nprejudice to the application of Union competition law.\n\nAs regards high-risk AI systems which are safety components of products, this\nproposal will be integrated into the existing sectoral safety legislation to\nensure consistency, avoid duplications and minimise additional burdens. In\nparticular, as regards high-risk AI systems related to products covered by the\nNew Legislative Framework (NLF) legislation (e.g. machinery, medical devices,\ntoys), the requirements for AI systems set out in this proposal will be\nchecked as part of the existing conformity assessment procedures under the\nrelevant NLF legislation. With regard to the interplay of requirements, while\nthe safety risks specific to AI systems are meant to be covered by the\nrequirements of this proposal, NLF legislation aims at ensuring the overall\nsafety of the final product and therefore may contain specific requirements\nregarding the safe integration of an AI system into the final product. The\nproposal for a Machinery Regulation, which is adopted on the same day as this\nproposal fully reflects this approach. As regards high-risk AI systems related\nto products covered by relevant Old Approach legislation (e.g. aviation,\ncars), this proposal would not directly apply. However, the ex-ante essential\nrequirements for high-risk AI systems set out in this proposal will have to be\ntaken into account when adopting relevant implementing or delegated\nlegislation under those acts.",
"a5199f23-bcdc-445d-a1b5-a8e8da0bd715": "With regard to the interplay of requirements, while\nthe safety risks specific to AI systems are meant to be covered by the\nrequirements of this proposal, NLF legislation aims at ensuring the overall\nsafety of the final product and therefore may contain specific requirements\nregarding the safe integration of an AI system into the final product. The\nproposal for a Machinery Regulation, which is adopted on the same day as this\nproposal fully reflects this approach. As regards high-risk AI systems related\nto products covered by relevant Old Approach legislation (e.g. aviation,\ncars), this proposal would not directly apply. However, the ex-ante essential\nrequirements for high-risk AI systems set out in this proposal will have to be\ntaken into account when adopting relevant implementing or delegated\nlegislation under those acts.\n\nAs regards AI systems provided or used by regulated credit institutions, the\nauthorities responsible for the supervision of the Union\u2019s financial services\nlegislation should be designated as competent authorities for supervising the\nrequirements in this proposal to ensure a coherent enforcement of the\nobligations under this proposal and the Union\u2019s financial services legislation\nwhere AI systems are to some extent implicitly regulated in relation to the\ninternal governance system of credit institutions. To further enhance\nconsistency, the conformity assessment procedure and some of the providers\u2019\nprocedural obligations under this proposal are integrated into the procedures\nunder Directive 2013/36/EU on access to the activity of credit institutions\nand the prudential supervision 14 .\n\nThis proposal is also consistent with the applicable Union legislation on\nservices, including on intermediary services regulated by the e-Commerce\nDirective 2000/31/EC 15 and the Commission\u2019s recent proposal for the Digital\nServices Act (DSA) 16 .\n\nIn relation to AI systems that are components of large-scale IT systems in the\nArea of Freedom, Security and Justice managed by the European Union Agency for\nthe Operational Management of Large-Scale IT Systems (eu-LISA), the proposal\nwill not apply to those AI systems that have been placed on the market or put\ninto service before one year has elapsed from the date of application of this\nRegulation, unless the replacement or amendment of those legal acts leads to a\nsignificant change in the design or intended purpose of the AI system or AI\nsystems concerned.\n\n1.3.Consistency with other Union policies\n\nThe proposal is part of a wider comprehensive package of measures that address\nproblems posed by the development and use of AI, as examined in the White\nPaper on AI. Consistency and complementarity is therefore ensured with other\nongoing or planned initiatives of the Commission that also aim to address\nthose problems, including the revision of sectoral product legislation (e.g.\nthe Machinery Directive, the General Product Safety Directive) and initiatives\nthat address liability issues related to new technologies, including AI\nsystems. Those initiatives will build on and complement this proposal in order\nto bring legal clarity and foster the development of an ecosystem of trust in\nAI in Europe.\n\nThe proposal is also coherent with the Commission\u2019s overall digital strategy\nin its contribution to promoting technology that works for people, one of the\nthree main pillars of the policy orientation and objectives announced in the\nCommunication \u2018Shaping Europe's digital future\u2019 17 . It lays down a coherent,\neffective and proportionate framework to ensure AI is developed in ways that\nrespect people\u2019s rights and earn their trust, making Europe fit for the\ndigital age and turning the next ten years into the Digital Decade 18 .\n\nFurthermore, the promotion of AI-driven innovation is closely linked to the\nData Governance Act 19 , the Open Data Directive 20 and other initiatives\nunder the EU strategy for data 21 , which will establish trusted mechanisms\nand services for the re-use, sharing and pooling of data that are essential\nfor the development of data-driven AI models of high quality.\n\nThe proposal also strengthens significantly the Union\u2019s role to help shape\nglobal norms and standards and promote trustworthy AI that is consistent with\nUnion values and interests. It provides the Union with a powerful basis to\nengage further with its external partners, including third countries, and at\ninternational fora on issues relating to AI.\n\n2.LEGAL BASIS, SUBSIDIARITY AND PROPORTIONALITY\n\n2.1.Legal basis\n\nThe legal basis for the proposal is in the first place Article 114 of the\nTreaty on the Functioning of the European Union (TFEU), which provides for the\nadoption of measures to ensure the establishment and functioning of the\ninternal market.\n\nThis proposal constitutes a core part of the EU digital single market\nstrategy.",
"51cf2907-1cdb-4f2c-a830-fa19fa1ae686": "The proposal also strengthens significantly the Union\u2019s role to help shape\nglobal norms and standards and promote trustworthy AI that is consistent with\nUnion values and interests. It provides the Union with a powerful basis to\nengage further with its external partners, including third countries, and at\ninternational fora on issues relating to AI.\n\n2.LEGAL BASIS, SUBSIDIARITY AND PROPORTIONALITY\n\n2.1.Legal basis\n\nThe legal basis for the proposal is in the first place Article 114 of the\nTreaty on the Functioning of the European Union (TFEU), which provides for the\nadoption of measures to ensure the establishment and functioning of the\ninternal market.\n\nThis proposal constitutes a core part of the EU digital single market\nstrategy. The primary objective of this proposal is to ensure the proper\nfunctioning of the internal market by setting harmonised rules in particular\non the development, placing on the Union market and the use of products and\nservices making use of AI technologies or provided as stand-alone AI systems.\nSome Member States are already considering national rules to ensure that AI is\nsafe and is developed and used in compliance with fundamental rights\nobligations. This will likely lead to two main problems: i) a fragmentation of\nthe internal market on essential elements regarding in particular the\nrequirements for the AI products and services, their marketing, their use, the\nliability and the supervision by public authorities, and ii) the substantial\ndiminishment of legal certainty for both providers and users of AI systems on\nhow existing and new rules will apply to those systems in the Union. Given the\nwide circulation of products and services across borders, these two problems\ncan be best solved through EU harmonizing legislation.\n\nIndeed, the proposal defines common mandatory requirements applicable to the\ndesign and development of certain AI systems before they are placed on the\nmarket that will be further operationalised through harmonised technical\nstandards. The proposal also addresses the situation after AI systems have\nbeen placed on the market by harmonising the way in which ex-post controls are\nconducted.\n\nIn addition, considering that this proposal contains certain specific rules on\nthe protection of individuals with regard to the processing of personal data,\nnotably restrictions of the use of AI systems for \u2018real-time\u2019 remote biometric\nidentification in publicly accessible spaces for the purpose of law\nenforcement, it is appropriate to base this regulation, in as far as those\nspecific rules are concerned, on Article 16 of the TFEU.\n\n2.2.Subsidiarity (for non-exclusive competence)\n\nThe nature of AI, which often relies on large and varied datasets and which\nmay be embedded in any product or service circulating freely within the\ninternal market, entails that the objectives of this proposal cannot be\neffectively achieved by Member States alone. Furthermore, an emerging\npatchwork of potentially divergent national rules will hamper the seamless\ncirculation of products and services related to AI systems across the EU and\nwill be ineffective in ensuring the safety and protection of fundamental\nrights and Union values across the different Member States. National\napproaches in addressing the problems will only create additional legal\nuncertainty and barriers, and will slow market uptake of AI.\n\nThe objectives of this proposal can be better achieved at Union level to avoid\na further fragmentation of the Single Market into potentially contradictory\nnational frameworks preventing the free circulation of goods and services\nembedding AI. A solid European regulatory framework for trustworthy AI will\nalso ensure a level playing field and protect all people, while strengthening\nEurope\u2019s competitiveness and industrial basis in AI. Only common action at\nUnion level can also protect the Union\u2019s digital sovereignty and leverage its\ntools and regulatory powers to shape global rules and standards.\n\n2.3.Proportionality\n\nThe proposal builds on existing legal frameworks and is proportionate and\nnecessary to achieve its objectives, since it follows a risk-based approach\nand imposes regulatory burdens only when an AI system is likely to pose high\nrisks to fundamental rights and safety. For other, non-high-risk AI systems,\nonly very limited transparency obligations are imposed, for example in terms\nof the provision of information to flag the use of an AI system when\ninteracting with humans. For high-risk AI systems, the requirements of high\nquality data, documentation and traceability, transparency, human oversight,\naccuracy and robustness, are strictly necessary to mitigate the risks to\nfundamental rights and safety posed by AI and that are not covered by other\nexisting legal frameworks. Harmonised standards and supporting guidance and\ncompliance tools will assist providers and users in complying with the\nrequirements laid down by the proposal and minimise their costs. The costs\nincurred by operators are proportionate to the objectives achieved and the\neconomic and reputational benefits that operators can expect from this\nproposal.",
"41e99a3d-407b-4d82-b011-c98d55bfe46f": "For other, non-high-risk AI systems,\nonly very limited transparency obligations are imposed, for example in terms\nof the provision of information to flag the use of an AI system when\ninteracting with humans. For high-risk AI systems, the requirements of high\nquality data, documentation and traceability, transparency, human oversight,\naccuracy and robustness, are strictly necessary to mitigate the risks to\nfundamental rights and safety posed by AI and that are not covered by other\nexisting legal frameworks. Harmonised standards and supporting guidance and\ncompliance tools will assist providers and users in complying with the\nrequirements laid down by the proposal and minimise their costs. The costs\nincurred by operators are proportionate to the objectives achieved and the\neconomic and reputational benefits that operators can expect from this\nproposal.\n\n2.4.Choice of the instrument\n\nThe choice of a regulation as a legal instrument is justified by the need for\na uniform application of the new rules, such as definition of AI, the\nprohibition of certain harmful AI-enabled practices and the classification of\ncertain AI systems. The direct applicability of a Regulation, in accordance\nwith Article 288 TFEU, will reduce legal fragmentation and facilitate the\ndevelopment of a single market for lawful, safe and trustworthy AI systems. It\nwill do so, in particular, by introducing a harmonised set of core\nrequirements with regard to AI systems classified as high-risk and obligations\nfor providers and users of those systems, improving the protection of\nfundamental rights and providing legal certainty for operators and consumers\nalike.\n\nAt the same time, the provisions of the regulation are not overly prescriptive\nand leave room for different levels of Member State action for elements that\ndo not undermine the objectives of the initiative, in particular the internal\norganisation of the market surveillance system and the uptake of measures to\nfoster innovation.\n\n3.RESULTS OF EX-POST EVALUATIONS, STAKEHOLDER CONSULTATIONS AND IMPACT\nASSESSMENTS\n\n3.1.Stakeholder consultation\n\nThis proposal is the result of extensive consultation with all major\nstakeholders, in which the general principles and minimum standards for\nconsultation of interested parties by the Commission were applied.\n\nAn online public consultation was launched on 19 February 2020 along with the\npublication of the White Paper on Artificial Intelligence and ran until 14\nJune 2020. The objective of that consultation was to collect views and\nopinions on the White Paper. It targeted all interested stakeholders from the\npublic and private sectors, including governments, local authorities,\ncommercial and non-commercial organisations, social partners, experts,\nacademics and citizens. After analysing all the responses received, the\nCommission published a summary outcome and the individual responses on its\nwebsite 22 .\n\nIn total, 1215 contributions were received, of which 352 were from companies\nor business organisations/associations, 406 from individuals (92%individuals\nfrom EU ), 152 on behalf of academic/research institutions, and 73 from public\nauthorities. Civil society\u2019s voices were represented by 160 respondents (among\nwhich 9 consumers\u2019 organisations, 129 non-governmental organisations and 22\ntrade unions), 72 respondents contributed as \u2018others\u2019. Of the 352 business and\nindustry representatives, 222 were companies and business representatives,\n41.5% of which were micro, small and medium-sized enterprises. The rest were\nbusiness associations. Overall, 84% of business and industry replies came from\nthe EU-27. Depending on the question, between 81 and 598 of the respondents\nused the free text option to insert comments. Over 450 position papers were\nsubmitted through the EU Survey website, either in addition to questionnaire\nanswers (over 400) or as stand-alone contributions (over 50).\n\nOverall, there is a general agreement amongst stakeholders on a need for\naction. A large majority of stakeholders agree that legislative gaps exist or\nthat new legislation is needed. However, several stakeholders warn the\nCommission to avoid duplication, conflicting obligations and overregulation.\nThere were many comments underlining the importance of a technology neutral\nand proportionate regulatory framework.\n\nStakeholders mostly requested a narrow, clear and precise definition for AI.\nStakeholders also highlighted that besides the clarification of the term of\nAI, it is important to define \u2018risk\u2019, \u2018high-risk\u2019, \u2018low-risk\u2019, \u2018remote\nbiometric identification\u2019 and \u2018harm\u2019.\n\nMost of the respondents are explicitly in favour of the risk-based approach.\nUsing a risk-based framework was considered a better option than blanket\nregulation of all AI systems. The types of risks and threats should be based\non a sector-by-sector and case-by-case approach.",
"7e3728b3-e60c-4a6a-ab0b-e9a9e1ac9477": "Overall, there is a general agreement amongst stakeholders on a need for\naction. A large majority of stakeholders agree that legislative gaps exist or\nthat new legislation is needed. However, several stakeholders warn the\nCommission to avoid duplication, conflicting obligations and overregulation.\nThere were many comments underlining the importance of a technology neutral\nand proportionate regulatory framework.\n\nStakeholders mostly requested a narrow, clear and precise definition for AI.\nStakeholders also highlighted that besides the clarification of the term of\nAI, it is important to define \u2018risk\u2019, \u2018high-risk\u2019, \u2018low-risk\u2019, \u2018remote\nbiometric identification\u2019 and \u2018harm\u2019.\n\nMost of the respondents are explicitly in favour of the risk-based approach.\nUsing a risk-based framework was considered a better option than blanket\nregulation of all AI systems. The types of risks and threats should be based\non a sector-by-sector and case-by-case approach. Risks also should be\ncalculated taking into account the impact on rights and safety.\n\nRegulatory sandboxes could be very useful for the promotion of AI and are\nwelcomed by certain stakeholders, especially the Business Associations.\n\nAmong those who formulated their opinion on the enforcement models, more than\n50%, especially from the business associations, were in favour of a\ncombination of an ex-ante risk self-assessment and an ex-post enforcement for\nhigh-risk AI systems.\n\n3.2.Collection and use of expertise\n\nThe proposal builds on two years of analysis and close involvement of\nstakeholders, including academics, businesses, social partners, non-\ngovernmental organisations, Member States and citizens. The preparatory work\nstarted in 2018 with the setting up of a High-Level Expert Group on AI (HLEG)\nwhich had an inclusive and broad composition of 52 well-known experts tasked\nto advise the Commission on the implementation of the Commission\u2019s Strategy on\nArtificial Intelligence. In April 2019, the Commission supported 23 the key\nrequirements set out in the HLEG ethics guidelines for Trustworthy AI 24 ,\nwhich had been revised to take into account more than 500 submissions from\nstakeholders. The key requirements reflect a widespread and common approach,\nas evidenced by a plethora of ethical codes and principles developed by many\nprivate and public organisations in Europe and beyond, that AI development and\nuse should be guided by certain essential value-oriented principles. The\nAssessment List for Trustworthy Artificial Intelligence (ALTAI) 25 made those\nrequirements operational in a piloting process with over 350 organisations.\n\nIn addition, the AI Alliance 26 was formed as a platform for approximately\n4000 stakeholders to debate the technological and societal implications of AI,\nculminating in a yearly AI Assembly.\n\nThe White Paper on AI further developed this inclusive approach, inciting\ncomments from more than 1250 stakeholders, including over 450 additional\nposition papers. As a result, the Commission published an Inception Impact\nAssessment, which in turn attracted more than 130 comments 27 . Additional\nstakeholder workshops and events were also organised the results of which\nsupport the analysis in the impact assessment and the policy choices made in\nthis proposal 28 . An external study was also procured to feed into the impact\nassessment.\n\n3.3.Impact assessment\n\nIn line with its \u201cBetter Regulation\u201d policy, the Commission conducted an\nimpact assessment for this proposal examined by the Commission's Regulatory\nScrutiny Board. A meeting with the Regulatory Scrutiny Board was held on 16\nDecember 2020, which was followed by a negative opinion. After substantial\nrevision of the impact assessment to address the comments and a resubmission\nof the impact assessment, the Regulatory Scrutiny Board issued a positive\nopinion on 21 March 2021. The opinions of the Regulatory Scrutiny Board, the\nrecommendations and an explanation of how they have been taken into account\nare presented in Annex 1 of the impact assessment.\n\nThe Commission examined different policy options to achieve the general\nobjective of the proposal, which is to ensure the proper functioning of the\nsingle market by creating the conditions for the development and use of\ntrustworthy AI in the Union.\n\nFour policy options of different degrees of regulatory intervention were\nassessed:\n\n\u00b7Option 1: EU legislative instrument setting up a voluntary labelling scheme;\n\n\u00b7Option 2: a sectoral, \u201cad-hoc\u201d approach;\n\n\u00b7Option 3: Horizontal EU legislative instrument following a proportionate\nrisk-based approach;\n\n\u00b7Option 3+: Horizontal EU legislative instrument following a proportionate\nrisk-based approach + codes of conduct for non-high-risk AI systems;\n\n\u00b7Option 4: Horizontal EU legislative instrument establishing mandatory\nrequirements for all AI systems, irrespective of the risk they pose.",
"7902fa7a-008b-4fc0-9fb7-27ad82ce9f08": "The opinions of the Regulatory Scrutiny Board, the\nrecommendations and an explanation of how they have been taken into account\nare presented in Annex 1 of the impact assessment.\n\nThe Commission examined different policy options to achieve the general\nobjective of the proposal, which is to ensure the proper functioning of the\nsingle market by creating the conditions for the development and use of\ntrustworthy AI in the Union.\n\nFour policy options of different degrees of regulatory intervention were\nassessed:\n\n\u00b7Option 1: EU legislative instrument setting up a voluntary labelling scheme;\n\n\u00b7Option 2: a sectoral, \u201cad-hoc\u201d approach;\n\n\u00b7Option 3: Horizontal EU legislative instrument following a proportionate\nrisk-based approach;\n\n\u00b7Option 3+: Horizontal EU legislative instrument following a proportionate\nrisk-based approach + codes of conduct for non-high-risk AI systems;\n\n\u00b7Option 4: Horizontal EU legislative instrument establishing mandatory\nrequirements for all AI systems, irrespective of the risk they pose.\n\nAccording to the Commission's established methodology, each policy option was\nevaluated against economic and societal impacts, with a particular focus on\nimpacts on fundamental rights. The preferred option is option 3+, a regulatory\nframework for high-risk AI systems only, with the possibility for all\nproviders of non-high-risk AI systems to follow a code of conduct. The\nrequirements will concern data, documentation and traceability, provision of\ninformation and transparency, human oversight and robustness and accuracy and\nwould be mandatory for high-risk AI systems. Companies that introduced codes\nof conduct for other AI systems would do so voluntarily.\n\nThe preferred option was considered suitable to address in the most effective\nway the objectives of this proposal. By requiring a restricted yet effective\nset of actions from AI developers and users, the preferred option limits the\nrisks of violation of fundamental rights and safety of people and foster\neffective supervision and enforcement, by targeting the requirements only to\nsystems where there is a high risk that such violations could occur. As a\nresult, that option keeps compliance costs to a minimum, thus avoiding an\nunnecessary slowing of uptake due to higher prices and compliance costs. In\norder to address possible disadvantages for SMEs, this option includes several\nprovisions to support their compliance and reduce their costs, including\ncreation of regulatory sandboxes and obligation to consider SMEs interests\nwhen setting fees related to conformity assessment.\n\nThe preferred option will increase people\u2019s trust in AI, companies will gain\nin legal certainty, and Member States will see no reason to take unilateral\naction that could fragment the single market. As a result of higher demand due\nto higher trust, more available offers due to legal certainty, and the absence\nof obstacles to cross-border movement of AI systems, the single market for AI\nwill likely flourish. The European Union will continue to develop a fast-\ngrowing AI ecosystem of innovative services and products embedding AI\ntechnology or stand-alone AI systems, resulting in increased digital autonomy.\n\nBusinesses or public authorities that develop or use AI applications that\nconstitute a high risk for the safety or fundamental rights of citizens would\nhave to comply with specific requirements and obligations. Compliance with\nthese requirements would imply costs amounting to approximately EUR \u20ac 6000 to\nEUR \u20ac 7000 for the supply of an average high-risk AI system of around EUR \u20ac\n170000 by 2025. For AI users, there would also be the annual cost for the time\nspent on ensuring human oversight where this is appropriate, depending on the\nuse case. Those have been estimated at approximately EUR \u20ac 5000 to EUR \u20ac 8000\nper year. Verification costs could amount to another EUR \u20ac 3000 to EUR \u20ac 7500\nfor suppliers of high-risk AI. Businesses or public authorities that develop\nor use any AI applications not classified as high risk would only have minimal\nobligations of information. However, they could choose to join others and\ntogether adopt a code of conduct to follow suitable requirements, and to\nensure that their AI systems are trustworthy. In such a case, costs would be\nat most as high as for high-risk AI systems, but most probably lower.\n\nThe impacts of the policy options on different categories of stakeholders\n(economic operators/ business; conformity assessment bodies, standardisation\nbodies and other public bodies; individuals/citizens; researchers) are\nexplained in detail in Annex 3 of the Impact assessment supporting this\nproposal.\n\n3.4.Regulatory fitness and simplification\n\nThis proposal lays down obligation that will apply to providers and users of\nhigh-risk AI systems. For providers who develop and place such systems on the\nUnion market, it will create legal certainty and ensure that no obstacle to\nthe cross-border provision of AI-related services and products emerge.",
"78b1722e-2d24-4a3e-ae86-13c54aa19534": "However, they could choose to join others and\ntogether adopt a code of conduct to follow suitable requirements, and to\nensure that their AI systems are trustworthy. In such a case, costs would be\nat most as high as for high-risk AI systems, but most probably lower.\n\nThe impacts of the policy options on different categories of stakeholders\n(economic operators/ business; conformity assessment bodies, standardisation\nbodies and other public bodies; individuals/citizens; researchers) are\nexplained in detail in Annex 3 of the Impact assessment supporting this\nproposal.\n\n3.4.Regulatory fitness and simplification\n\nThis proposal lays down obligation that will apply to providers and users of\nhigh-risk AI systems. For providers who develop and place such systems on the\nUnion market, it will create legal certainty and ensure that no obstacle to\nthe cross-border provision of AI-related services and products emerge. For\ncompanies using AI, it will promote trust among their customers. For national\npublic administrations, it will promote public trust in the use of AI and\nstrengthen enforcement mechanisms (by introducing a European coordination\nmechanism, providing for appropriate capacities, and facilitating audits of\nthe AI systems with new requirements for documentation, traceability and\ntransparency). Moreover, the framework will envisage specific measures\nsupporting innovation, including regulatory sandboxes and specific measures\nsupporting small-scale users and providers of high-risk AI systems to comply\nwith the new rules.\n\nThe proposal also specifically aims at strengthening Europe\u2019s competitiveness\nand industrial basis in AI. Full consistency is ensured with existing sectoral\nUnion legislation applicable to AI systems (e.g. on products and services)\nthat will bring further clarity and simplify the enforcement of the new rules.\n\n3.5.Fundamental rights\n\nThe use of AI with its specific characteristics (e.g. opacity, complexity,\ndependency on data, autonomous behaviour) can adversely affect a number of\nfundamental rights enshrined in the EU Charter of Fundamental Rights (\u2018the\nCharter\u2019). This proposal seeks to ensure a high level of protection for those\nfundamental rights and aims to address various sources of risks through a\nclearly defined risk-based approach. With a set of requirements for\ntrustworthy AI and proportionate obligations on all value chain participants,\nthe proposal will enhance and promote the protection of the rights protected\nby the Charter: the right to human dignity (Article 1), respect for private\nlife and protection of personal data (Articles 7 and 8), non-discrimination\n(Article 21) and equality between women and men (Article 23). It aims to\nprevent a chilling effect on the rights to freedom of expression (Article 11)\nand freedom of assembly (Article 12), to ensure protection of the right to an\neffective remedy and to a fair trial, the rights of defence and the\npresumption of innocence (Articles 47 and 48), as well as the general\nprinciple of good administration. Furthermore, as applicable in certain\ndomains, the proposal will positively affect the rights of a number of special\ngroups, such as the workers\u2019 rights to fair and just working conditions\n(Article 31), a high level of consumer protection (Article 28), the rights of\nthe child (Article 24) and the integration of persons with disabilities\n(Article 26). The right to a high level of environmental protection and the\nimprovement of the quality of the environment (Article 37) is also relevant,\nincluding in relation to the health and safety of people. The obligations for\nex ante testing, risk management and human oversight will also facilitate the\nrespect of other fundamental rights by minimising the risk of erroneous or\nbiased AI-assisted decisions in critical areas such as education and training,\nemployment, important services, law enforcement and the judiciary. In case\ninfringements of fundamental rights still happen, effective redress for\naffected persons will be made possible by ensuring transparency and\ntraceability of the AI systems coupled with strong ex post controls.\n\nThis proposal imposes some restrictions on the freedom to conduct business\n(Article 16) and the freedom of art and science (Article 13) to ensure\ncompliance with overriding reasons of public interest such as health, safety,\nconsumer protection and the protection of other fundamental rights\n(\u2018responsible innovation\u2019) when high-risk AI technology is developed and used.\nThose restrictions are proportionate and limited to the minimum necessary to\nprevent and mitigate serious safety risks and likely infringements of\nfundamental rights.",
"748908de-d1bd-47f7-8dee-189d44943fc2": "In case\ninfringements of fundamental rights still happen, effective redress for\naffected persons will be made possible by ensuring transparency and\ntraceability of the AI systems coupled with strong ex post controls.\n\nThis proposal imposes some restrictions on the freedom to conduct business\n(Article 16) and the freedom of art and science (Article 13) to ensure\ncompliance with overriding reasons of public interest such as health, safety,\nconsumer protection and the protection of other fundamental rights\n(\u2018responsible innovation\u2019) when high-risk AI technology is developed and used.\nThose restrictions are proportionate and limited to the minimum necessary to\nprevent and mitigate serious safety risks and likely infringements of\nfundamental rights.\n\nThe increased transparency obligations will also not disproportionately affect\nthe right to protection of intellectual property (Article 17(2)), since they\nwill be limited only to the minimum necessary information for individuals to\nexercise their right to an effective remedy and to the necessary transparency\ntowards supervision and enforcement authorities, in line with their mandates.\nAny disclosure of information will be carried out in compliance with relevant\nlegislation in the field, including Directive 2016/943 on the protection of\nundisclosed know-how and business information (trade secrets) against their\nunlawful acquisition, use and disclosure. When public authorities and notified\nbodies need to be given access to confidential information or source code to\nexamine compliance with substantial obligations, they are placed under binding\nconfidentiality obligations.\n\n4.BUDGETARY IMPLICATIONS\n\nMember States will have to designate supervisory authorities in charge of\nimplementing the legislative requirements. Their supervisory function could\nbuild on existing arrangements, for example regarding conformity assessment\nbodies or market surveillance, but would require sufficient technological\nexpertise and human and financial resources. Depending on the pre-existing\nstructure in each Member State, this could amount to 1 to 25 Full Time\nEquivalents per Member State.\n\nA detailed overview of the costs involved is provided in the \u2018financial\nstatement\u2019 linked to this proposal.\n\n5.OTHER ELEMENTS\n\n5.1.Implementation plans and monitoring, evaluation and reporting arrangements\n\nProviding for a robust monitoring and evaluation mechanism is crucial to\nensure that the proposal will be effective in achieving its specific\nobjectives. The Commission will be in charge of monitoring the effects of the\nproposal. It will establish a system for registering stand-alone high-risk AI\napplications in a public EU-wide database. This registration will also enable\ncompetent authorities, users and other interested people to verify if the\nhigh-risk AI system complies with the requirements laid down in the proposal\nand to exercise enhanced oversight over those AI systems posing high risks to\nfundamental rights. To feed this database, AI providers will be obliged to\nprovide meaningful information about their systems and the conformity\nassessment carried out on those systems.\n\nMoreover, AI providers will be obliged to inform national competent\nauthorities about serious incidents or malfunctioning that constitute a breach\nof fundamental rights obligations as soon as they become aware of them, as\nwell as any recalls or withdrawals of AI systems from the market. National\ncompetent authorities will then investigate the incidents/or malfunctioning,\ncollect all the necessary information and regularly transmit it to the\nCommission with adequate metadata. The Commission will complement this\ninformation on the incidents by a comprehensive analysis of the overall market\nfor AI.\n\nThe Commission will publish a report evaluating and reviewing the proposed AI\nframework five years following the date on which it becomes applicable.\n\n5.2.Detailed explanation of the specific provisions of the proposal\n\n5.2.1.SCOPE AND DEFINITIONS (TITLE I)\n\nTitle I defines the subject matter of the regulation and the scope of\napplication of the new rules that cover the placing on the market, putting\ninto service and use of AI systems. It also sets out the definitions used\nthroughout the instrument. The definition of AI system in the legal framework\naims to be as technology neutral and future proof as possible, taking into\naccount the fast technological and market developments related to AI. In order\nto provide the needed legal certainty, Title I is complemented by Annex I,\nwhich contains a detailed list of approaches and techniques for the\ndevelopment of AI to be adapted by the Commission in line with new\ntechnological developments. Key participants across the AI value chain are\nalso clearly defined such as providers and users of AI systems that cover both\npublic and private operators to ensure a level playing field.\n\n5.2.2.PROHIBITED ARTIFICIAL INTELLIGENCE PRACTICES (TITLE II)\n\nTitle II establishes a list of prohibited AI.",
"42cffcb5-e37a-44fe-bee9-32f24aadf256": "It also sets out the definitions used\nthroughout the instrument. The definition of AI system in the legal framework\naims to be as technology neutral and future proof as possible, taking into\naccount the fast technological and market developments related to AI. In order\nto provide the needed legal certainty, Title I is complemented by Annex I,\nwhich contains a detailed list of approaches and techniques for the\ndevelopment of AI to be adapted by the Commission in line with new\ntechnological developments. Key participants across the AI value chain are\nalso clearly defined such as providers and users of AI systems that cover both\npublic and private operators to ensure a level playing field.\n\n5.2.2.PROHIBITED ARTIFICIAL INTELLIGENCE PRACTICES (TITLE II)\n\nTitle II establishes a list of prohibited AI. The regulation follows a risk-\nbased approach, differentiating between uses of AI that create (i) an\nunacceptable risk, (ii) a high risk, and (iii) low or minimal risk. The list\nof prohibited practices in Title II comprises all those AI systems whose use\nis considered unacceptable as contravening Union values, for instance by\nviolating fundamental rights. The prohibitions covers practices that have a\nsignificant potential to manipulate persons through subliminal techniques\nbeyond their consciousness or exploit vulnerabilities of specific vulnerable\ngroups such as children or persons with disabilities in order to materially\ndistort their behaviour in a manner that is likely to cause them or another\nperson psychological or physical harm. Other manipulative or exploitative\npractices affecting adults that might be facilitated by AI systems could be\ncovered by the existing data protection, consumer protection and digital\nservice legislation that guarantee that natural persons are properly informed\nand have free choice not to be subject to profiling or other practices that\nmight affect their behaviour. The proposal also prohibits AI-based social\nscoring for general purposes done by public authorities. Finally, the use of\n\u2018real time\u2019 remote biometric identification systems in publicly accessible\nspaces for the purpose of law enforcement is also prohibited unless certain\nlimited exceptions apply.\n\n5.2.3.HIGH-RISK AI SYSTEMS (TITLE III)\n\nTitle III contains specific rules for AI systems that create a high risk to\nthe health and safety or fundamental rights of natural persons. In line with a\nrisk-based approach, those high-risk AI systems are permitted on the European\nmarket subject to compliance with certain mandatory requirements and an ex-\nante conformity assessment. The classification of an AI system as high-risk is\nbased on the intended purpose of the AI system, in line with existing product\nsafety legislation. Therefore, the classification as high-risk does not only\ndepend on the function performed by the AI system, but also on the specific\npurpose and modalities for which that system is used.\n\nChapter 1 of Title III sets the classification rules and identifies two main\ncategories of high-risk AI systems:\n\n\u00b7AI systems intended to be used as safety component of products that are\nsubject to third party ex-ante conformity assessment;\n\n\u00b7other stand-alone AI systems with mainly fundamental rights implications that\nare explicitly listed in Annex III.\n\nThis list of high-risk AI systems in Annex III contains a limited number of AI\nsystems whose risks have already materialised or are likely to materialise in\nthe near future. To ensure that the regulation can be adjusted to emerging\nuses and applications of AI, the Commission may expand the list of high-risk\nAI systems used within certain pre-defined areas, by applying a set of\ncriteria and risk assessment methodology.\n\nChapter 2 sets out the legal requirements for high-risk AI systems in relation\nto data and data governance, documentation and recording keeping, transparency\nand provision of information to users, human oversight, robustness, accuracy\nand security. The proposed minimum requirements are already state-of-the-art\nfor many diligent operators and the result of two years of preparatory work,\nderived from the Ethics Guidelines of the HLEG 29 , piloted by more than 350\norganisations 30 . They are also largely consistent with other international\nrecommendations and principles, which ensures that the proposed AI framework\nis compatible with those adopted by the EU\u2019s international trade partners. The\nprecise technical solutions to achieve compliance with those requirements may\nbe provided by standards or by other technical specifications or otherwise be\ndeveloped in accordance with general engineering or scientific knowledge at\nthe discretion of the provider of the AI system. This flexibility is\nparticularly important, because it allows providers of AI systems to choose\nthe way to meet their requirements, taking into account the state-of-the-art\nand technological and scientific progress in this field.\n\nChapter 3 places a clear set of horizontal obligations on providers of high-\nrisk AI systems.",
"d2b4f960-b43e-402b-9037-da3f6e8fbc9f": "The proposed minimum requirements are already state-of-the-art\nfor many diligent operators and the result of two years of preparatory work,\nderived from the Ethics Guidelines of the HLEG 29 , piloted by more than 350\norganisations 30 . They are also largely consistent with other international\nrecommendations and principles, which ensures that the proposed AI framework\nis compatible with those adopted by the EU\u2019s international trade partners. The\nprecise technical solutions to achieve compliance with those requirements may\nbe provided by standards or by other technical specifications or otherwise be\ndeveloped in accordance with general engineering or scientific knowledge at\nthe discretion of the provider of the AI system. This flexibility is\nparticularly important, because it allows providers of AI systems to choose\nthe way to meet their requirements, taking into account the state-of-the-art\nand technological and scientific progress in this field.\n\nChapter 3 places a clear set of horizontal obligations on providers of high-\nrisk AI systems. Proportionate obligations are also placed on users and other\nparticipants across the AI value chain (e.g., importers, distributors,\nauthorized representatives).\n\nChapter 4 sets the framework for notified bodies to be involved as independent\nthird parties in conformity assessment procedures, while Chapter 5 explains in\ndetail the conformity assessment procedures to be followed for each type of\nhigh-risk AI system. The conformity assessment approach aims to minimise the\nburden for economic operators as well as for notified bodies, whose capacity\nneeds to be progressively ramped up over time. AI systems intended to be used\nas safety components of products that are regulated under the New Legislative\nFramework legislation (e.g. machinery, toys, medical devices, etc.) will be\nsubject to the same ex-ante and ex-post compliance and enforcement mechanisms\nof the products of which they are a component. The key difference is that the\nex-ante and ex-post mechanisms will ensure compliance not only with the\nrequirements established by sectorial legislation, but also with the\nrequirements established by this regulation.\n\nAs regards stand-alone high-risk AI systems that are referred to in Annex III,\na new compliance and enforcement system will be established. This follows the\nmodel of the New Legislative Framework legislation implemented through\ninternal control checks by the providers with the exception of remote\nbiometric identification systems that would be subject to third party\nconformity assessment. A comprehensive ex-ante conformity assessment through\ninternal checks, combined with a strong ex-post enforcement, could be an\neffective and reasonable solution for those systems, given the early phase of\nthe regulatory intervention and the fact the AI sector is very innovative and\nexpertise for auditing is only now being accumulated. An assessment through\ninternal checks for \u2018stand-alone\u2019 high-risk AI systems would require a full,\neffective and properly documented ex ante compliance with all requirements of\nthe regulation and compliance with robust quality and risk management systems\nand post-market monitoring. After the provider has performed the relevant\nconformity assessment, it should register those stand-alone high-risk AI\nsystems in an EU database that will be managed by the Commission to increase\npublic transparency and oversight and strengthen ex post supervision by\ncompetent authorities. By contrast, for reasons of consistency with the\nexisting product safety legislation, the conformity assessments of AI systems\nthat are safety components of products will follow a system with third party\nconformity assessment procedures already established under the relevant\nsectoral product safety legislation. New ex ante re-assessments of the\nconformity will be needed in case of substantial modifications to the AI\nsystems (and notably changes which go beyond what is pre-determined by the\nprovider in its technical documentation and checked at the moment of the ex-\nante conformity assessment).\n\n5.2.4.TRANSPARENCY OBLIGATIONS FOR CERTAIN AI SYSTEMS (TITLE IV)\n\nTitle IV concerns certain AI systems to take account of the specific risks of\nmanipulation they pose. Transparency obligations will apply for systems that\n(i) interact with humans, (ii) are used to detect emotions or determine\nassociation with (social) categories based on biometric data, or (iii)\ngenerate or manipulate content (\u2018deep fakes\u2019). When persons interact with an\nAI system or their emotions or characteristics are recognised through\nautomated means, people must be informed of that circumstance. If an AI system\nis used to generate or manipulate image, audio or video content that\nappreciably resembles authentic content, there should be an obligation to\ndisclose that the content is generated through automated means, subject to\nexceptions for legitimate purposes (law enforcement, freedom of expression).\nThis allows persons to make informed choices or step back from a given\nsituation.",
"7e60f260-6f64-4516-b132-94eca43d06ab": "Transparency obligations will apply for systems that\n(i) interact with humans, (ii) are used to detect emotions or determine\nassociation with (social) categories based on biometric data, or (iii)\ngenerate or manipulate content (\u2018deep fakes\u2019). When persons interact with an\nAI system or their emotions or characteristics are recognised through\nautomated means, people must be informed of that circumstance. If an AI system\nis used to generate or manipulate image, audio or video content that\nappreciably resembles authentic content, there should be an obligation to\ndisclose that the content is generated through automated means, subject to\nexceptions for legitimate purposes (law enforcement, freedom of expression).\nThis allows persons to make informed choices or step back from a given\nsituation.\n\n5.2.5.MEASURES IN SUPPORT OF INNOVATION (TITLE V)\n\nTitle V contributes to the objective to create a legal framework that is\ninnovation-friendly, future-proof and resilient to disruption. To that end, it\nencourages national competent authorities to set up regulatory sandboxes and\nsets a basic framework in terms of governance, supervision and liability. AI\nregulatory sandboxes establish a controlled environment to test innovative\ntechnologies for a limited time on the basis of a testing plan agreed with the\ncompetent authorities. Title V also contains measures to reduce the regulatory\nburden on SMEs and start-ups.\n\n5.2.6.GOVERNANCE AND IMPLEMENTATION (TITLES VI, VII AND VII)\n\nTitle VI sets up the governance systems at Union and national level. At Union\nlevel, the proposal establishes a European Artificial Intelligence Board (the\n\u2018Board\u2019), composed of representatives from the Member States and the\nCommission. The Board will facilitate a smooth, effective and harmonised\nimplementation of this regulation by contributing to the effective cooperation\nof the national supervisory authorities and the Commission and providing\nadvice and expertise to the Commission. It will also collect and share best\npractices among the Member States.\n\nAt national level, Member States will have to designate one or more national\ncompetent authorities and, among them, the national supervisory authority, for\nthe purpose of supervising the application and implementation of the\nregulation. The European Data Protection Supervisor will act as the competent\nauthority for the supervision of the Union institutions, agencies and bodies\nwhen they fall within the scope of this regulation.\n\nTitle VII aims to facilitate the monitoring work of the Commission and\nnational authorities through the establishment of an EU-wide database for\nstand-alone high-risk AI systems with mainly fundamental rights implications.\nThe database will be operated by the Commission and provided with data by the\nproviders of the AI systems, who will be required to register their systems\nbefore placing them on the market or otherwise putting them into service.\n\nTitle VIII sets out the monitoring and reporting obligations for providers of\nAI systems with regard to post-market monitoring and reporting and\ninvestigating on AI-related incidents and malfunctioning. Market surveillance\nauthorities would also control the market and investigate compliance with the\nobligations and requirements for all high-risk AI systems already placed on\nthe market. Market surveillance authorities would have all powers under\nRegulation (EU) 2019/1020 on market surveillance. Ex-post enforcement should\nensure that once the AI system has been put on the market, public authorities\nhave the powers and resources to intervene in case AI systems generate\nunexpected risks, which warrant rapid action. They will also monitor\ncompliance of operators with their relevant obligations under the regulation.\nThe proposal does not foresee the automatic creation of any additional bodies\nor authorities at Member State level. Member States may therefore appoint (and\ndraw upon the expertise of) existing sectorial authorities, who would be\nentrusted also with the powers to monitor and enforce the provisions of the\nregulation.\n\nAll this is without prejudice to the existing system and allocation of powers\nof ex-post enforcement of obligations regarding fundamental rights in the\nMember States. When necessary for their mandate, existing supervision and\nenforcement authorities will also have the power to request and access any\ndocumentation maintained following this regulation and, where needed, request\nmarket surveillance authorities to organise testing of the high-risk AI system\nthrough technical means.\n\n5.2.7.CODES OF CONDUCT (TITLE IX)\n\nTitle IX creates a framework for the creation of codes of conduct, which aim\nto encourage providers of non-high-risk AI systems to apply voluntarily the\nmandatory requirements for high-risk AI systems (as laid out in Title III).\nProviders of non-high-risk AI systems may create and implement the codes of\nconduct themselves.",
"982b1d35-3a02-4f1f-943a-4378da0e7c96": "All this is without prejudice to the existing system and allocation of powers\nof ex-post enforcement of obligations regarding fundamental rights in the\nMember States. When necessary for their mandate, existing supervision and\nenforcement authorities will also have the power to request and access any\ndocumentation maintained following this regulation and, where needed, request\nmarket surveillance authorities to organise testing of the high-risk AI system\nthrough technical means.\n\n5.2.7.CODES OF CONDUCT (TITLE IX)\n\nTitle IX creates a framework for the creation of codes of conduct, which aim\nto encourage providers of non-high-risk AI systems to apply voluntarily the\nmandatory requirements for high-risk AI systems (as laid out in Title III).\nProviders of non-high-risk AI systems may create and implement the codes of\nconduct themselves. Those codes may also include voluntary commitments\nrelated, for example, to environmental sustainability, accessibility for\npersons with disability, stakeholders\u2019 participation in the design and\ndevelopment of AI systems, and diversity of development teams.\n\n5.2.8.FINAL PROVISIONS (TITLES X, XI AND XII)\n\nTitle X emphasizes the obligation of all parties to respect the\nconfidentiality of information and data and sets out rules for the exchange of\ninformation obtained during the implementation of the regulation. Title X also\nincludes measures to ensure the effective implementation of the regulation\nthrough effective, proportionate, and dissuasive penalties for infringements\nof the provisions.\n\nTitle XI sets out rules for the exercise of delegation and implementing\npowers. The proposal empowers the Commission to adopt, where appropriate,\nimplementing acts to ensure uniform application of the regulation or delegated\nacts to update or complement the lists in Annexes I to VII.\n\nTitle XII contains an obligation for the Commission to assess regularly the\nneed for an update of Annex III and to prepare regular reports on the\nevaluation and review of the regulation. It also lays down final provisions,\nincluding a differentiated transitional period for the initial date of the\napplicability of the regulation to facilitate the smooth implementation for\nall parties concerned.\n\n2021/0106 (COD)\n\nProposal for a\n\nREGULATION OF THE EUROPEAN PARLIAMENT AND OF THE COUNCIL\n\nLAYING DOWN HARMONISED RULES ON ARTIFICIAL INTELLIGENCE (ARTIFICIAL\nINTELLIGENCE ACT) AND AMENDING CERTAIN UNION LEGISLATIVE ACTS\n\nTHE EUROPEAN PARLIAMENT AND THE COUNCIL OF THE EUROPEAN UNION,\n\nHaving regard to the Treaty on the Functioning of the European Union, and in\nparticular Articles 16 and 114 thereof,\n\nHaving regard to the proposal from the European Commission,\n\nAfter transmission of the draft legislative act to the national parliaments,\n\nHaving regard to the opinion of the European Economic and Social Committee 31\n,\n\nHaving regard to the opinion of the Committee of the Regions 32 ,\n\nActing in accordance with the ordinary legislative procedure,\n\nWhereas:\n\n(1)The purpose of this Regulation is to improve the functioning of the\ninternal market by laying down a uniform legal framework in particular for the\ndevelopment, marketing and use of artificial intelligence in conformity with\nUnion values. This Regulation pursues a number of overriding reasons of public\ninterest, such as a high level of protection of health, safety and fundamental\nrights, and it ensures the free movement of AI-based goods and services cross-\nborder, thus preventing Member States from imposing restrictions on the\ndevelopment, marketing and use of AI systems, unless explicitly authorised by\nthis Regulation.\n\n(2)Artificial intelligence systems (AI systems) can be easily deployed in\nmultiple sectors of the economy and society, including cross border, and\ncirculate throughout the Union. Certain Member States have already explored\nthe adoption of national rules to ensure that artificial intelligence is safe\nand is developed and used in compliance with fundamental rights obligations.\nDiffering national rules may lead to fragmentation of the internal market and\ndecrease legal certainty for operators that develop or use AI systems. A\nconsistent and high level of protection throughout the Union should therefore\nbe ensured, while divergences hampering the free circulation of AI systems and\nrelated products and services within the internal market should be prevented,\nby laying down uniform obligations for operators and guaranteeing the uniform\nprotection of overriding reasons of public interest and of rights of persons\nthroughout the internal market based on Article 114 of the Treaty on the\nFunctioning of the European Union (TFEU).",
"ab56beb4-8ab7-4db0-ab92-0133825cde9a": "(2)Artificial intelligence systems (AI systems) can be easily deployed in\nmultiple sectors of the economy and society, including cross border, and\ncirculate throughout the Union. Certain Member States have already explored\nthe adoption of national rules to ensure that artificial intelligence is safe\nand is developed and used in compliance with fundamental rights obligations.\nDiffering national rules may lead to fragmentation of the internal market and\ndecrease legal certainty for operators that develop or use AI systems. A\nconsistent and high level of protection throughout the Union should therefore\nbe ensured, while divergences hampering the free circulation of AI systems and\nrelated products and services within the internal market should be prevented,\nby laying down uniform obligations for operators and guaranteeing the uniform\nprotection of overriding reasons of public interest and of rights of persons\nthroughout the internal market based on Article 114 of the Treaty on the\nFunctioning of the European Union (TFEU). To the extent that this Regulation\ncontains specific rules on the protection of individuals with regard to the\nprocessing of personal data concerning restrictions of the use of AI systems\nfor \u2018real-time\u2019 remote biometric identification in publicly accessible spaces\nfor the purpose of law enforcement, it is appropriate to base this Regulation,\nin as far as those specific rules are concerned, on Article 16 of the TFEU. In\nlight of those specific rules and the recourse to Article 16 TFEU, it is\nappropriate to consult the European Data Protection Board.\n\n(3)Artificial intelligence is a fast evolving family of technologies that can\ncontribute to a wide array of economic and societal benefits across the entire\nspectrum of industries and social activities. By improving prediction,\noptimising operations and resource allocation, and personalising digital\nsolutions available for individuals and organisations, the use of artificial\nintelligence can provide key competitive advantages to companies and support\nsocially and environmentally beneficial outcomes, for example in healthcare,\nfarming, education and training, infrastructure management, energy, transport\nand logistics, public services, security, justice, resource and energy\nefficiency, and climate change mitigation and adaptation.\n\n(4)At the same time, depending on the circumstances regarding its specific\napplication and use, artificial intelligence may generate risks and cause harm\nto public interests and rights that are protected by Union law. Such harm\nmight be material or immaterial.\n\n(5)A Union legal framework laying down harmonised rules on artificial\nintelligence is therefore needed to foster the development, use and uptake of\nartificial intelligence in the internal market that at the same time meets a\nhigh level of protection of public interests, such as health and safety and\nthe protection of fundamental rights, as recognised and protected by Union\nlaw. To achieve that objective, rules regulating the placing on the market and\nputting into service of certain AI systems should be laid down, thus ensuring\nthe smooth functioning of the internal market and allowing those systems to\nbenefit from the principle of free movement of goods and services. By laying\ndown those rules, this Regulation supports the objective of the Union of being\na global leader in the development of secure, trustworthy and ethical\nartificial intelligence, as stated by the European Council 33 , and it ensures\nthe protection of ethical principles, as specifically requested by the\nEuropean Parliament 34 .\n\n(6)The notion of AI system should be clearly defined to ensure legal\ncertainty, while providing the flexibility to accommodate future technological\ndevelopments. The definition should be based on the key functional\ncharacteristics of the software, in particular the ability, for a given set of\nhuman-defined objectives, to generate outputs such as content, predictions,\nrecommendations, or decisions which influence the environment with which the\nsystem interacts, be it in a physical or digital dimension. AI systems can be\ndesigned to operate with varying levels of autonomy and be used on a stand-\nalone basis or as a component of a product, irrespective of whether the system\nis physically integrated into the product (embedded) or serve the\nfunctionality of the product without being integrated therein (non-embedded).\nThe definition of AI system should be complemented by a list of specific\ntechniques and approaches used for its development, which should be kept up-\nto\u2013date in the light of market and technological developments through the\nadoption of delegated acts by the Commission to amend that list.",
"b738438c-edb2-4e8d-af51-1bbb0cf90798": "The definition should be based on the key functional\ncharacteristics of the software, in particular the ability, for a given set of\nhuman-defined objectives, to generate outputs such as content, predictions,\nrecommendations, or decisions which influence the environment with which the\nsystem interacts, be it in a physical or digital dimension. AI systems can be\ndesigned to operate with varying levels of autonomy and be used on a stand-\nalone basis or as a component of a product, irrespective of whether the system\nis physically integrated into the product (embedded) or serve the\nfunctionality of the product without being integrated therein (non-embedded).\nThe definition of AI system should be complemented by a list of specific\ntechniques and approaches used for its development, which should be kept up-\nto\u2013date in the light of market and technological developments through the\nadoption of delegated acts by the Commission to amend that list.\n\n(7)The notion of biometric data used in this Regulation is in line with and\nshould be interpreted consistently with the notion of biometric data as\ndefined in Article 4(14) of Regulation (EU) 2016/679 of the European\nParliament and of the Council 35 , Article 3(18) of Regulation (EU) 2018/1725\nof the European Parliament and of the Council 36 and Article 3(13) of\nDirective (EU) 2016/680 of the European Parliament and of the Council 37 .\n\n(8)The notion of remote biometric identification system as used in this\nRegulation should be defined functionally, as an AI system intended for the\nidentification of natural persons at a distance through the comparison of a\nperson\u2019s biometric data with the biometric data contained in a reference\ndatabase, and without prior knowledge whether the targeted person will be\npresent and can be identified, irrespectively of the particular technology,\nprocesses or types of biometric data used. Considering their different\ncharacteristics and manners in which they are used, as well as the different\nrisks involved, a distinction should be made between \u2018real-time\u2019 and \u2018post\u2019\nremote biometric identification systems. In the case of \u2018real-time\u2019 systems,\nthe capturing of the biometric data, the comparison and the identification\noccur all instantaneously, near-instantaneously or in any event without a\nsignificant delay. In this regard, there should be no scope for circumventing\nthe rules of this Regulation on the \u2018real-time\u2019 use of the AI systems in\nquestion by providing for minor delays. \u2018Real-time\u2019 systems involve the use of\n\u2018live\u2019 or \u2018near-\u2018live\u2019 material, such as video footage, generated by a camera\nor other device with similar functionality. In the case of \u2018post\u2019 systems, in\ncontrast, the biometric data have already been captured and the comparison and\nidentification occur only after a significant delay. This involves material,\nsuch as pictures or video footage generated by closed circuit television\ncameras or private devices, which has been generated before the use of the\nsystem in respect of the natural persons concerned.\n\n(9)For the purposes of this Regulation the notion of publicly accessible space\nshould be understood as referring to any physical place that is accessible to\nthe public, irrespective of whether the place in question is privately or\npublicly owned. Therefore, the notion does not cover places that are private\nin nature and normally not freely accessible for third parties, including law\nenforcement authorities, unless those parties have been specifically invited\nor authorised, such as homes, private clubs, offices, warehouses and\nfactories. Online spaces are not covered either, as they are not physical\nspaces. However, the mere fact that certain conditions for accessing a\nparticular space may apply, such as admission tickets or age restrictions,\ndoes not mean that the space is not publicly accessible within the meaning of\nthis Regulation. Consequently, in addition to public spaces such as streets,\nrelevant parts of government buildings and most transport infrastructure,\nspaces such as cinemas, theatres, shops and shopping centres are normally also\npublicly accessible. Whether a given space is accessible to the public should\nhowever be determined on a case-by-case basis, having regard to the\nspecificities of the individual situation at hand.\n\n(10)In order to ensure a level playing field and an effective protection of\nrights and freedoms of individuals across the Union, the rules established by\nthis Regulation should apply to providers of AI systems in a non-\ndiscriminatory manner, irrespective of whether they are established within the\nUnion or in a third country, and to users of AI systems established within the\nUnion.",
"29987751-33be-414b-9307-c7807acfe1b6": "Consequently, in addition to public spaces such as streets,\nrelevant parts of government buildings and most transport infrastructure,\nspaces such as cinemas, theatres, shops and shopping centres are normally also\npublicly accessible. Whether a given space is accessible to the public should\nhowever be determined on a case-by-case basis, having regard to the\nspecificities of the individual situation at hand.\n\n(10)In order to ensure a level playing field and an effective protection of\nrights and freedoms of individuals across the Union, the rules established by\nthis Regulation should apply to providers of AI systems in a non-\ndiscriminatory manner, irrespective of whether they are established within the\nUnion or in a third country, and to users of AI systems established within the\nUnion.\n\n(11)In light of their digital nature, certain AI systems should fall within\nthe scope of this Regulation even when they are neither placed on the market,\nnor put into service, nor used in the Union. This is the case for example of\nan operator established in the Union that contracts certain services to an\noperator established outside the Union in relation to an activity to be\nperformed by an AI system that would qualify as high-risk and whose effects\nimpact natural persons located in the Union. In those circumstances, the AI\nsystem used by the operator outside the Union could process data lawfully\ncollected in and transferred from the Union, and provide to the contracting\noperator in the Union the output of that AI system resulting from that\nprocessing, without that AI system being placed on the market, put into\nservice or used in the Union. To prevent the circumvention of this Regulation\nand to ensure an effective protection of natural persons located in the Union,\nthis Regulation should also apply to providers and users of AI systems that\nare established in a third country, to the extent the output produced by those\nsystems is used in the Union. Nonetheless, to take into account existing\narrangements and special needs for cooperation with foreign partners with whom\ninformation and evidence is exchanged, this Regulation should not apply to\npublic authorities of a third country and international organisations when\nacting in the framework of international agreements concluded at national or\nEuropean level for law enforcement and judicial cooperation with the Union or\nwith its Member States. Such agreements have been concluded bilaterally\nbetween Member States and third countries or between the European Union,\nEuropol and other EU agencies and third countries and international\norganisations.\n\n(12)This Regulation should also apply to Union institutions, offices, bodies\nand agencies when acting as a provider or user of an AI system. AI systems\nexclusively developed or used for military purposes should be excluded from\nthe scope of this Regulation where that use falls under the exclusive remit of\nthe Common Foreign and Security Policy regulated under Title V of the Treaty\non the European Union (TEU). This Regulation should be without prejudice to\nthe provisions regarding the liability of intermediary service providers set\nout in Directive 2000/31/EC of the European Parliament and of the Council [as\namended by the Digital Services Act].\n\n(13)In order to ensure a consistent and high level of protection of public\ninterests as regards health, safety and fundamental rights, common normative\nstandards for all high-risk AI systems should be established. Those standards\nshould be consistent with the Charter of fundamental rights of the European\nUnion (the Charter) and should be non-discriminatory and in line with the\nUnion\u2019s international trade commitments.\n\n(14)In order to introduce a proportionate and effective set of binding rules\nfor AI systems, a clearly defined risk-based approach should be followed. That\napproach should tailor the type and content of such rules to the intensity and\nscope of the risks that AI systems can generate. It is therefore necessary to\nprohibit certain artificial intelligence practices, to lay down requirements\nfor high-risk AI systems and obligations for the relevant operators, and to\nlay down transparency obligations for certain AI systems.\n\n(15)Aside from the many beneficial uses of artificial intelligence, that\ntechnology can also be misused and provide novel and powerful tools for\nmanipulative, exploitative and social control practices. Such practices are\nparticularly harmful and should be prohibited because they contradict Union\nvalues of respect for human dignity, freedom, equality, democracy and the rule\nof law and Union fundamental rights, including the right to non-\ndiscrimination, data protection and privacy and the rights of the child.\n\n(16)The placing on the market, putting into service or use of certain AI\nsystems intended to distort human behaviour, whereby physical or psychological\nharms are likely to occur, should be forbidden.",
"38ea2640-90a3-4af7-999a-b69ed2a1ddf8": "It is therefore necessary to\nprohibit certain artificial intelligence practices, to lay down requirements\nfor high-risk AI systems and obligations for the relevant operators, and to\nlay down transparency obligations for certain AI systems.\n\n(15)Aside from the many beneficial uses of artificial intelligence, that\ntechnology can also be misused and provide novel and powerful tools for\nmanipulative, exploitative and social control practices. Such practices are\nparticularly harmful and should be prohibited because they contradict Union\nvalues of respect for human dignity, freedom, equality, democracy and the rule\nof law and Union fundamental rights, including the right to non-\ndiscrimination, data protection and privacy and the rights of the child.\n\n(16)The placing on the market, putting into service or use of certain AI\nsystems intended to distort human behaviour, whereby physical or psychological\nharms are likely to occur, should be forbidden. Such AI systems deploy\nsubliminal components individuals cannot perceive or exploit vulnerabilities\nof children and people due to their age, physical or mental incapacities. They\ndo so with the intention to materially distort the behaviour of a person and\nin a manner that causes or is likely to cause harm to that or another person.\nThe intention may not be presumed if the distortion of human behaviour results\nfrom factors external to the AI system which are outside of the control of the\nprovider or the user. Research for legitimate purposes in relation to such AI\nsystems should not be stifled by the prohibition, if such research does not\namount to use of the AI system in human-machine relations that exposes natural\npersons to harm and such research is carried out in accordance with recognised\nethical standards for scientific research.\n\n(17)AI systems providing social scoring of natural persons for general purpose\nby public authorities or on their behalf may lead to discriminatory outcomes\nand the exclusion of certain groups. They may violate the right to dignity and\nnon-discrimination and the values of equality and justice. Such AI systems\nevaluate or classify the trustworthiness of natural persons based on their\nsocial behaviour in multiple contexts or known or predicted personal or\npersonality characteristics. The social score obtained from such AI systems\nmay lead to the detrimental or unfavourable treatment of natural persons or\nwhole groups thereof in social contexts, which are unrelated to the context in\nwhich the data was originally generated or collected or to a detrimental\ntreatment that is disproportionate or unjustified to the gravity of their\nsocial behaviour. Such AI systems should be therefore prohibited.\n\n(18)The use of AI systems for \u2018real-time\u2019 remote biometric identification of\nnatural persons in publicly accessible spaces for the purpose of law\nenforcement is considered particularly intrusive in the rights and freedoms of\nthe concerned persons, to the extent that it may affect the private life of a\nlarge part of the population, evoke a feeling of constant surveillance and\nindirectly dissuade the exercise of the freedom of assembly and other\nfundamental rights. In addition, the immediacy of the impact and the limited\nopportunities for further checks or corrections in relation to the use of such\nsystems operating in \u2018real-time\u2019 carry heightened risks for the rights and\nfreedoms of the persons that are concerned by law enforcement activities.\n\n(19)The use of those systems for the purpose of law enforcement should\ntherefore be prohibited, except in three exhaustively listed and narrowly\ndefined situations, where the use is strictly necessary to achieve a\nsubstantial public interest, the importance of which outweighs the risks.\nThose situations involve the search for potential victims of crime, including\nmissing children; certain threats to the life or physical safety of natural\npersons or of a terrorist attack; and the detection, localisation,\nidentification or prosecution of perpetrators or suspects of the criminal\noffences referred to in Council Framework Decision 2002/584/JHA 38 if those\ncriminal offences are punishable in the Member State concerned by a custodial\nsentence or a detention order for a maximum period of at least three years and\nas they are defined in the law of that Member State. Such threshold for the\ncustodial sentence or detention order in accordance with national law\ncontributes to ensure that the offence should be serious enough to potentially\njustify the use of \u2018real-time\u2019 remote biometric identification systems.",
"eb05be6d-82cf-402a-ac4d-cecbb979ddb8": "Those situations involve the search for potential victims of crime, including\nmissing children; certain threats to the life or physical safety of natural\npersons or of a terrorist attack; and the detection, localisation,\nidentification or prosecution of perpetrators or suspects of the criminal\noffences referred to in Council Framework Decision 2002/584/JHA 38 if those\ncriminal offences are punishable in the Member State concerned by a custodial\nsentence or a detention order for a maximum period of at least three years and\nas they are defined in the law of that Member State. Such threshold for the\ncustodial sentence or detention order in accordance with national law\ncontributes to ensure that the offence should be serious enough to potentially\njustify the use of \u2018real-time\u2019 remote biometric identification systems.\nMoreover, of the 32 criminal offences listed in the Council Framework Decision\n2002/584/JHA, some are in practice likely to be more relevant than others, in\nthat the recourse to \u2018real-time\u2019 remote biometric identification will\nforeseeably be necessary and proportionate to highly varying degrees for the\npractical pursuit of the detection, localisation, identification or\nprosecution of a perpetrator or suspect of the different criminal offences\nlisted and having regard to the likely differences in the seriousness,\nprobability and scale of the harm or possible negative consequences.\n\n(20)In order to ensure that those systems are used in a responsible and\nproportionate manner, it is also important to establish that, in each of those\nthree exhaustively listed and narrowly defined situations, certain elements\nshould be taken into account, in particular as regards the nature of the\nsituation giving rise to the request and the consequences of the use for the\nrights and freedoms of all persons concerned and the safeguards and conditions\nprovided for with the use. In addition, the use of \u2018real-time\u2019 remote\nbiometric identification systems in publicly accessible spaces for the purpose\nof law enforcement should be subject to appropriate limits in time and space,\nhaving regard in particular to the evidence or indications regarding the\nthreats, the victims or perpetrator. The reference database of persons should\nbe appropriate for each use case in each of the three situations mentioned\nabove.\n\n(21)Each use of a \u2018real-time\u2019 remote biometric identification system in\npublicly accessible spaces for the purpose of law enforcement should be\nsubject to an express and specific authorisation by a judicial authority or by\nan independent administrative authority of a Member State. Such authorisation\nshould in principle be obtained prior to the use, except in duly justified\nsituations of urgency, that is, situations where the need to use the systems\nin question is such as to make it effectively and objectively impossible to\nobtain an authorisation before commencing the use. In such situations of\nurgency, the use should be restricted to the absolute minimum necessary and be\nsubject to appropriate safeguards and conditions, as determined in national\nlaw and specified in the context of each individual urgent use case by the law\nenforcement authority itself. In addition, the law enforcement authority\nshould in such situations seek to obtain an authorisation as soon as possible,\nwhilst providing the reasons for not having been able to request it earlier.\n\n(22)Furthermore, it is appropriate to provide, within the exhaustive framework\nset by this Regulation that such use in the territory of a Member State in\naccordance with this Regulation should only be possible where and in as far as\nthe Member State in question has decided to expressly provide for the\npossibility to authorise such use in its detailed rules of national law.\nConsequently, Member States remain free under this Regulation not to provide\nfor such a possibility at all or to only provide for such a possibility in\nrespect of some of the objectives capable of justifying authorised use\nidentified in this Regulation.\n\n(23)The use of AI systems for \u2018real-time\u2019 remote biometric identification of\nnatural persons in publicly accessible spaces for the purpose of law\nenforcement necessarily involves the processing of biometric data. The rules\nof this Regulation that prohibit, subject to certain exceptions, such use,\nwhich are based on Article 16 TFEU, should apply as lex specialis in respect\nof the rules on the processing of biometric data contained in Article 10 of\nDirective (EU) 2016/680, thus regulating such use and the processing of\nbiometric data involved in an exhaustive manner.",
"b38aa789-7f23-4da4-9da3-4589f0985436": "Consequently, Member States remain free under this Regulation not to provide\nfor such a possibility at all or to only provide for such a possibility in\nrespect of some of the objectives capable of justifying authorised use\nidentified in this Regulation.\n\n(23)The use of AI systems for \u2018real-time\u2019 remote biometric identification of\nnatural persons in publicly accessible spaces for the purpose of law\nenforcement necessarily involves the processing of biometric data. The rules\nof this Regulation that prohibit, subject to certain exceptions, such use,\nwhich are based on Article 16 TFEU, should apply as lex specialis in respect\nof the rules on the processing of biometric data contained in Article 10 of\nDirective (EU) 2016/680, thus regulating such use and the processing of\nbiometric data involved in an exhaustive manner. Therefore, such use and\nprocessing should only be possible in as far as it is compatible with the\nframework set by this Regulation, without there being scope, outside that\nframework, for the competent authorities, where they act for purpose of law\nenforcement, to use such systems and process such data in connection thereto\non the grounds listed in Article 10 of Directive (EU) 2016/680. In this\ncontext, this Regulation is not intended to provide the legal basis for the\nprocessing of personal data under Article 8 of Directive 2016/680. However,\nthe use of \u2018real-time\u2019 remote biometric identification systems in publicly\naccessible spaces for purposes other than law enforcement, including by\ncompetent authorities, should not be covered by the specific framework\nregarding such use for the purpose of law enforcement set by this Regulation.\nSuch use for purposes other than law enforcement should therefore not be\nsubject to the requirement of an authorisation under this Regulation and the\napplicable detailed rules of national law that may give effect to it.\n\n(24)Any processing of biometric data and other personal data involved in the\nuse of AI systems for biometric identification, other than in connection to\nthe use of \u2018real-time\u2019 remote biometric identification systems in publicly\naccessible spaces for the purpose of law enforcement as regulated by this\nRegulation, including where those systems are used by competent authorities in\npublicly accessible spaces for other purposes than law enforcement, should\ncontinue to comply with all requirements resulting from Article 9(1) of\nRegulation (EU) 2016/679, Article 10(1) of Regulation (EU) 2018/1725 and\nArticle 10 of Directive (EU) 2016/680, as applicable.\n\n(25)In accordance with Article 6a of Protocol No 21 on the position of the\nUnited Kingdom and Ireland in respect of the area of freedom, security and\njustice, as annexed to the TEU and to the TFEU, Ireland is not bound by the\nrules laid down in Article 5(1), point (d), (2) and (3) of this Regulation\nadopted on the basis of Article 16 of the TFEU which relate to the processing\nof personal data by the Member States when carrying out activities falling\nwithin the scope of Chapter 4 or Chapter 5 of Title V of Part Three of the\nTFEU, where Ireland is not bound by the rules governing the forms of judicial\ncooperation in criminal matters or police cooperation which require compliance\nwith the provisions laid down on the basis of Article 16 of the TFEU.\n\n(26)In accordance with Articles 2 and 2a of Protocol No 22 on the position of\nDenmark, annexed to the TEU and TFEU, Denmark is not bound by rules laid down\nin Article 5(1), point (d), (2) and (3) of this Regulation adopted on the\nbasis of Article 16 of the TFEU, or subject to their application, which relate\nto the processing of personal data by the Member States when carrying out\nactivities falling within the scope of Chapter 4 or Chapter 5 of Title V of\nPart Three of the TFEU.\n\n(27)High-risk AI systems should only be placed on the Union market or put into\nservice if they comply with certain mandatory requirements. Those requirements\nshould ensure that high-risk AI systems available in the Union or whose output\nis otherwise used in the Union do not pose unacceptable risks to important\nUnion public interests as recognised and protected by Union law. AI systems\nidentified as high-risk should be limited to those that have a significant\nharmful impact on the health, safety and fundamental rights of persons in the\nUnion and such limitation minimises any potential restriction to international\ntrade, if any.",
"a3f4c0b6-a7c0-4db7-acae-2b3785bd73dc": "(27)High-risk AI systems should only be placed on the Union market or put into\nservice if they comply with certain mandatory requirements. Those requirements\nshould ensure that high-risk AI systems available in the Union or whose output\nis otherwise used in the Union do not pose unacceptable risks to important\nUnion public interests as recognised and protected by Union law. AI systems\nidentified as high-risk should be limited to those that have a significant\nharmful impact on the health, safety and fundamental rights of persons in the\nUnion and such limitation minimises any potential restriction to international\ntrade, if any.\n\n(28)AI systems could produce adverse outcomes to health and safety of persons,\nin particular when such systems operate as components of products.\nConsistently with the objectives of Union harmonisation legislation to\nfacilitate the free movement of products in the internal market and to ensure\nthat only safe and otherwise compliant products find their way into the\nmarket, it is important that the safety risks that may be generated by a\nproduct as a whole due to its digital components, including AI systems, are\nduly prevented and mitigated. For instance, increasingly autonomous robots,\nwhether in the context of manufacturing or personal assistance and care should\nbe able to safely operate and performs their functions in complex\nenvironments. Similarly, in the health sector where the stakes for life and\nhealth are particularly high, increasingly sophisticated diagnostics systems\nand systems supporting human decisions should be reliable and accurate. The\nextent of the adverse impact caused by the AI system on the fundamental rights\nprotected by the Charter is of particular relevance when classifying an AI\nsystem as high-risk. Those rights include the right to human dignity, respect\nfor private and family life, protection of personal data, freedom of\nexpression and information, freedom of assembly and of association, and non-\ndiscrimination, consumer protection, workers\u2019 rights, rights of persons with\ndisabilities, right to an effective remedy and to a fair trial, right of\ndefence and the presumption of innocence, right to good administration. In\naddition to those rights, it is important to highlight that children have\nspecific rights as enshrined in Article 24 of the EU Charter and in the United\nNations Convention on the Rights of the Child (further elaborated in the UNCRC\nGeneral Comment No. 25 as regards the digital environment), both of which\nrequire consideration of the children\u2019s vulnerabilities and provision of such\nprotection and care as necessary for their well-being. The fundamental right\nto a high level of environmental protection enshrined in the Charter and\nimplemented in Union policies should also be considered when assessing the\nseverity of the harm that an AI system can cause, including in relation to the\nhealth and safety of persons.\n\n(29)As regards high-risk AI systems that are safety components of products or\nsystems, or which are themselves products or systems falling within the scope\nof Regulation (EC) No 300/2008 of the European Parliament and of the Council\n39 , Regulation (EU) No 167/2013 of the European Parliament and of the Council\n40 , Regulation (EU) No 168/2013 of the European Parliament and of the Council\n41 , Directive 2014/90/EU of the European Parliament and of the Council 42 ,\nDirective (EU) 2016/797 of the European Parliament and of the Council 43 ,\nRegulation (EU) 2018/858 of the European Parliament and of the Council 44 ,\nRegulation (EU) 2018/1139 of the European Parliament and of the Council 45 ,\nand Regulation (EU) 2019/2144 of the European Parliament and of the Council 46\n, it is appropriate to amend those acts to ensure that the Commission takes\ninto account, on the basis of the technical and regulatory specificities of\neach sector, and without interfering with existing governance, conformity\nassessment and enforcement mechanisms and authorities established therein, the\nmandatory requirements for high-risk AI systems laid down in this Regulation\nwhen adopting any relevant future delegated or implementing acts on the basis\nof those acts.\n\n(30)As regards AI systems that are safety components of products, or which are\nthemselves products, falling within the scope of certain Union harmonisation\nlegislation, it is appropriate to classify them as high-risk under this\nRegulation if the product in question undergoes the conformity assessment\nprocedure with a third-party conformity assessment body pursuant to that\nrelevant Union harmonisation legislation. In particular, such products are\nmachinery, toys, lifts, equipment and protective systems intended for use in\npotentially explosive atmospheres, radio equipment, pressure equipment,\nrecreational craft equipment, cableway installations, appliances burning\ngaseous fuels, medical devices, and in vitro diagnostic medical devices.",
"0a7f300d-22b2-43f4-801c-55d358a0cd95": "(30)As regards AI systems that are safety components of products, or which are\nthemselves products, falling within the scope of certain Union harmonisation\nlegislation, it is appropriate to classify them as high-risk under this\nRegulation if the product in question undergoes the conformity assessment\nprocedure with a third-party conformity assessment body pursuant to that\nrelevant Union harmonisation legislation. In particular, such products are\nmachinery, toys, lifts, equipment and protective systems intended for use in\npotentially explosive atmospheres, radio equipment, pressure equipment,\nrecreational craft equipment, cableway installations, appliances burning\ngaseous fuels, medical devices, and in vitro diagnostic medical devices.\n\n(31)The classification of an AI system as high-risk pursuant to this\nRegulation should not necessarily mean that the product whose safety component\nis the AI system, or the AI system itself as a product, is considered \u2018high-\nrisk\u2019 under the criteria established in the relevant Union harmonisation\nlegislation that applies to the product. This is notably the case for\nRegulation (EU) 2017/745 of the European Parliament and of the Council 47 and\nRegulation (EU) 2017/746 of the European Parliament and of the Council 48 ,\nwhere a third-party conformity assessment is provided for medium-risk and\nhigh-risk products.\n\n(32)As regards stand-alone AI systems, meaning high-risk AI systems other than\nthose that are safety components of products, or which are themselves\nproducts, it is appropriate to classify them as high-risk if, in the light of\ntheir intended purpose, they pose a high risk of harm to the health and safety\nor the fundamental rights of persons, taking into account both the severity of\nthe possible harm and its probability of occurrence and they are used in a\nnumber of specifically pre-defined areas specified in the Regulation. The\nidentification of those systems is based on the same methodology and criteria\nenvisaged also for any future amendments of the list of high-risk AI systems.\n\n(33)Technical inaccuracies of AI systems intended for the remote biometric\nidentification of natural persons can lead to biased results and entail\ndiscriminatory effects. This is particularly relevant when it comes to age,\nethnicity, sex or disabilities. Therefore, \u2018real-time\u2019 and \u2018post\u2019 remote\nbiometric identification systems should be classified as high-risk. In view of\nthe risks that they pose, both types of remote biometric identification\nsystems should be subject to specific requirements on logging capabilities and\nhuman oversight.\n\n(34)As regards the management and operation of critical infrastructure, it is\nappropriate to classify as high-risk the AI systems intended to be used as\nsafety components in the management and operation of road traffic and the\nsupply of water, gas, heating and electricity, since their failure or\nmalfunctioning may put at risk the life and health of persons at large scale\nand lead to appreciable disruptions in the ordinary conduct of social and\neconomic activities.\n\n(35)AI systems used in education or vocational training, notably for\ndetermining access or assigning persons to educational and vocational training\ninstitutions or to evaluate persons on tests as part of or as a precondition\nfor their education should be considered high-risk, since they may determine\nthe educational and professional course of a person\u2019s life and therefore\naffect their ability to secure their livelihood. When improperly designed and\nused, such systems may violate the right to education and training as well as\nthe right not to be discriminated against and perpetuate historical patterns\nof discrimination.\n\n(36)AI systems used in employment, workers management and access to self-\nemployment, notably for the recruitment and selection of persons, for making\ndecisions on promotion and termination and for task allocation, monitoring or\nevaluation of persons in work-related contractual relationships, should also\nbe classified as high-risk, since those systems may appreciably impact future\ncareer prospects and livelihoods of these persons. Relevant work-related\ncontractual relationships should involve employees and persons providing\nservices through platforms as referred to in the Commission Work Programme\n2021. Such persons should in principle not be considered users within the\nmeaning of this Regulation. Throughout the recruitment process and in the\nevaluation, promotion, or retention of persons in work-related contractual\nrelationships, such systems may perpetuate historical patterns of\ndiscrimination, for example against women, certain age groups, persons with\ndisabilities, or persons of certain racial or ethnic origins or sexual\norientation. AI systems used to monitor the performance and behaviour of these\npersons may also impact their rights to data protection and privacy.",
"030500a1-15da-49de-ab82-85f033486b65": "Relevant work-related\ncontractual relationships should involve employees and persons providing\nservices through platforms as referred to in the Commission Work Programme\n2021. Such persons should in principle not be considered users within the\nmeaning of this Regulation. Throughout the recruitment process and in the\nevaluation, promotion, or retention of persons in work-related contractual\nrelationships, such systems may perpetuate historical patterns of\ndiscrimination, for example against women, certain age groups, persons with\ndisabilities, or persons of certain racial or ethnic origins or sexual\norientation. AI systems used to monitor the performance and behaviour of these\npersons may also impact their rights to data protection and privacy.\n\n(37)Another area in which the use of AI systems deserves special consideration\nis the access to and enjoyment of certain essential private and public\nservices and benefits necessary for people to fully participate in society or\nto improve one\u2019s standard of living. In particular, AI systems used to\nevaluate the credit score or creditworthiness of natural persons should be\nclassified as high-risk AI systems, since they determine those persons\u2019 access\nto financial resources or essential services such as housing, electricity, and\ntelecommunication services. AI systems used for this purpose may lead to\ndiscrimination of persons or groups and perpetuate historical patterns of\ndiscrimination, for example based on racial or ethnic origins, disabilities,\nage, sexual orientation, or create new forms of discriminatory impacts.\nConsidering the very limited scale of the impact and the available\nalternatives on the market, it is appropriate to exempt AI systems for the\npurpose of creditworthiness assessment and credit scoring when put into\nservice by small-scale providers for their own use. Natural persons applying\nfor or receiving public assistance benefits and services from public\nauthorities are typically dependent on those benefits and services and in a\nvulnerable position in relation to the responsible authorities. If AI systems\nare used for determining whether such benefits and services should be denied,\nreduced, revoked or reclaimed by authorities, they may have a significant\nimpact on persons\u2019 livelihood and may infringe their fundamental rights, such\nas the right to social protection, non-discrimination, human dignity or an\neffective remedy. Those systems should therefore be classified as high-risk.\nNonetheless, this Regulation should not hamper the development and use of\ninnovative approaches in the public administration, which would stand to\nbenefit from a wider use of compliant and safe AI systems, provided that those\nsystems do not entail a high risk to legal and natural persons. Finally, AI\nsystems used to dispatch or establish priority in the dispatching of emergency\nfirst response services should also be classified as high-risk since they make\ndecisions in very critical situations for the life and health of persons and\ntheir property.\n\n(38)Actions by law enforcement authorities involving certain uses of AI\nsystems are characterised by a significant degree of power imbalance and may\nlead to surveillance, arrest or deprivation of a natural person\u2019s liberty as\nwell as other adverse impacts on fundamental rights guaranteed in the Charter.\nIn particular, if the AI system is not trained with high quality data, does\nnot meet adequate requirements in terms of its accuracy or robustness, or is\nnot properly designed and tested before being put on the market or otherwise\nput into service, it may single out people in a discriminatory or otherwise\nincorrect or unjust manner. Furthermore, the exercise of important procedural\nfundamental rights, such as the right to an effective remedy and to a fair\ntrial as well as the right of defence and the presumption of innocence, could\nbe hampered, in particular, where such AI systems are not sufficiently\ntransparent, explainable and documented. It is therefore appropriate to\nclassify as high-risk a number of AI systems intended to be used in the law\nenforcement context where accuracy, reliability and transparency is\nparticularly important to avoid adverse impacts, retain public trust and\nensure accountability and effective redress. In view of the nature of the\nactivities in question and the risks relating thereto, those high-risk AI\nsystems should include in particular AI systems intended to be used by law\nenforcement authorities for individual risk assessments, polygraphs and\nsimilar tools or to detect the emotional state of natural person, to detect\n\u2018deep fakes\u2019, for the evaluation of the reliability of evidence in criminal\nproceedings, for predicting the occurrence or reoccurrence of an actual or\npotential criminal offence based on profiling of natural persons, or assessing\npersonality traits and characteristics or past criminal behaviour of natural\npersons or groups, for profiling in the course of detection, investigation or\nprosecution of criminal offences, as well as for crime analytics regarding\nnatural persons.",
"98ea4c32-2ab5-41d3-9b8d-07f8823e8ead": "In view of the nature of the\nactivities in question and the risks relating thereto, those high-risk AI\nsystems should include in particular AI systems intended to be used by law\nenforcement authorities for individual risk assessments, polygraphs and\nsimilar tools or to detect the emotional state of natural person, to detect\n\u2018deep fakes\u2019, for the evaluation of the reliability of evidence in criminal\nproceedings, for predicting the occurrence or reoccurrence of an actual or\npotential criminal offence based on profiling of natural persons, or assessing\npersonality traits and characteristics or past criminal behaviour of natural\npersons or groups, for profiling in the course of detection, investigation or\nprosecution of criminal offences, as well as for crime analytics regarding\nnatural persons. AI systems specifically intended to be used for\nadministrative proceedings by tax and customs authorities should not be\nconsidered high-risk AI systems used by law enforcement authorities for the\npurposes of prevention, detection, investigation and prosecution of criminal\noffences.\n\n(39)AI systems used in migration, asylum and border control management affect\npeople who are often in particularly vulnerable position and who are dependent\non the outcome of the actions of the competent public authorities. The\naccuracy, non-discriminatory nature and transparency of the AI systems used in\nthose contexts are therefore particularly important to guarantee the respect\nof the fundamental rights of the affected persons, notably their rights to\nfree movement, non-discrimination, protection of private life and personal\ndata, international protection and good administration. It is therefore\nappropriate to classify as high-risk AI systems intended to be used by the\ncompetent public authorities charged with tasks in the fields of migration,\nasylum and border control management as polygraphs and similar tools or to\ndetect the emotional state of a natural person; for assessing certain risks\nposed by natural persons entering the territory of a Member State or applying\nfor visa or asylum; for verifying the authenticity of the relevant documents\nof natural persons; for assisting competent public authorities for the\nexamination of applications for asylum, visa and residence permits and\nassociated complaints with regard to the objective to establish the\neligibility of the natural persons applying for a status. AI systems in the\narea of migration, asylum and border control management covered by this\nRegulation should comply with the relevant procedural requirements set by the\nDirective 2013/32/EU of the European Parliament and of the Council 49 , the\nRegulation (EC) No 810/2009 of the European Parliament and of the Council 50\nand other relevant legislation.\n\n(40)Certain AI systems intended for the administration of justice and\ndemocratic processes should be classified as high-risk, considering their\npotentially significant impact on democracy, rule of law, individual freedoms\nas well as the right to an effective remedy and to a fair trial. In\nparticular, to address the risks of potential biases, errors and opacity, it\nis appropriate to qualify as high-risk AI systems intended to assist judicial\nauthorities in researching and interpreting facts and the law and in applying\nthe law to a concrete set of facts. Such qualification should not extend,\nhowever, to AI systems intended for purely ancillary administrative activities\nthat do not affect the actual administration of justice in individual cases,\nsuch as anonymisation or pseudonymisation of judicial decisions, documents or\ndata, communication between personnel, administrative tasks or allocation of\nresources.\n\n(41)The fact that an AI system is classified as high risk under this\nRegulation should not be interpreted as indicating that the use of the system\nis necessarily lawful under other acts of Union law or under national law\ncompatible with Union law, such as on the protection of personal data, on the\nuse of polygraphs and similar tools or other systems to detect the emotional\nstate of natural persons. Any such use should continue to occur solely in\naccordance with the applicable requirements resulting from the Charter and\nfrom the applicable acts of secondary Union law and national law. This\nRegulation should not be understood as providing for the legal ground for\nprocessing of personal data, including special categories of personal data,\nwhere relevant.\n\n(42)To mitigate the risks from high-risk AI systems placed or otherwise put\ninto service on the Union market for users and affected persons, certain\nmandatory requirements should apply, taking into account the intended purpose\nof the use of the system and according to the risk management system to be\nestablished by the provider.\n\n(43)Requirements should apply to high-risk AI systems as regards the quality\nof data sets used, technical documentation and record-keeping, transparency\nand the provision of information to users, human oversight, and robustness,\naccuracy and cybersecurity.",
"24ccfe7d-de4b-4363-8ff8-d55577cb0622": "Any such use should continue to occur solely in\naccordance with the applicable requirements resulting from the Charter and\nfrom the applicable acts of secondary Union law and national law. This\nRegulation should not be understood as providing for the legal ground for\nprocessing of personal data, including special categories of personal data,\nwhere relevant.\n\n(42)To mitigate the risks from high-risk AI systems placed or otherwise put\ninto service on the Union market for users and affected persons, certain\nmandatory requirements should apply, taking into account the intended purpose\nof the use of the system and according to the risk management system to be\nestablished by the provider.\n\n(43)Requirements should apply to high-risk AI systems as regards the quality\nof data sets used, technical documentation and record-keeping, transparency\nand the provision of information to users, human oversight, and robustness,\naccuracy and cybersecurity. Those requirements are necessary to effectively\nmitigate the risks for health, safety and fundamental rights, as applicable in\nthe light of the intended purpose of the system, and no other less trade\nrestrictive measures are reasonably available, thus avoiding unjustified\nrestrictions to trade.\n\n(44)High data quality is essential for the performance of many AI systems,\nespecially when techniques involving the training of models are used, with a\nview to ensure that the high-risk AI system performs as intended and safely\nand it does not become the source of discrimination prohibited by Union law.\nHigh quality training, validation and testing data sets require the\nimplementation of appropriate data governance and management practices.\nTraining, validation and testing data sets should be sufficiently relevant,\nrepresentative and free of errors and complete in view of the intended purpose\nof the system. They should also have the appropriate statistical properties,\nincluding as regards the persons or groups of persons on which the high-risk\nAI system is intended to be used. In particular, training, validation and\ntesting data sets should take into account, to the extent required in the\nlight of their intended purpose, the features, characteristics or elements\nthat are particular to the specific geographical, behavioural or functional\nsetting or context within which the AI system is intended to be used. In order\nto protect the right of others from the discrimination that might result from\nthe bias in AI systems, the providers shouldbe able to process also special\ncategories of personal data, as a matter of substantial public interest, in\norder to ensure the bias monitoring, detection and correction in relation to\nhigh-risk AI systems.\n\n(45)For the development of high-risk AI systems, certain actors, such as\nproviders, notified bodies and other relevant entities, such as digital\ninnovation hubs, testing experimentation facilities and researchers, should be\nable to access and use high quality datasets within their respective fields of\nactivities which are related to this Regulation. European common data spaces\nestablished by the Commission and the facilitation of data sharing between\nbusinesses and with government in the public interest will be instrumental to\nprovide trustful, accountable and non-discriminatory access to high quality\ndata for the training, validation and testing of AI systems. For example, in\nhealth, the European health data space will facilitate non-discriminatory\naccess to health data and the training of artificial intelligence algorithms\non those datasets, in a privacy-preserving, secure, timely, transparent and\ntrustworthy manner, and with an appropriate institutional governance. Relevant\ncompetent authorities, including sectoral ones, providing or supporting the\naccess to data may also support the provision of high-quality data for the\ntraining, validation and testing of AI systems.\n\n(46)Having information on how high-risk AI systems have been developed and how\nthey perform throughout their lifecycle is essential to verify compliance with\nthe requirements under this Regulation. This requires keeping records and the\navailability of a technical documentation, containing information which is\nnecessary to assess the compliance of the AI system with the relevant\nrequirements. Such information should include the general characteristics,\ncapabilities and limitations of the system, algorithms, data, training,\ntesting and validation processes used as well as documentation on the relevant\nrisk management system. The technical documentation should be kept up to date.\n\n(47)To address the opacity that may make certain AI systems incomprehensible\nto or too complex for natural persons, a certain degree of transparency should\nbe required for high-risk AI systems. Users should be able to interpret the\nsystem output and use it appropriately. High-risk AI systems should therefore\nbe accompanied by relevant documentation and instructions of use and include\nconcise and clear information, including in relation to possible risks to\nfundamental rights and discrimination, where appropriate.\n\n(48)High-risk AI systems should be designed and developed in such a way that\nnatural persons can oversee their functioning.",
"25f75982-c50b-4778-93d0-15c589c5e3e5": "Such information should include the general characteristics,\ncapabilities and limitations of the system, algorithms, data, training,\ntesting and validation processes used as well as documentation on the relevant\nrisk management system. The technical documentation should be kept up to date.\n\n(47)To address the opacity that may make certain AI systems incomprehensible\nto or too complex for natural persons, a certain degree of transparency should\nbe required for high-risk AI systems. Users should be able to interpret the\nsystem output and use it appropriately. High-risk AI systems should therefore\nbe accompanied by relevant documentation and instructions of use and include\nconcise and clear information, including in relation to possible risks to\nfundamental rights and discrimination, where appropriate.\n\n(48)High-risk AI systems should be designed and developed in such a way that\nnatural persons can oversee their functioning. For this purpose, appropriate\nhuman oversight measures should be identified by the provider of the system\nbefore its placing on the market or putting into service. In particular, where\nappropriate, such measures should guarantee that the system is subject to in-\nbuilt operational constraints that cannot be overridden by the system itself\nand is responsive to the human operator, and that the natural persons to whom\nhuman oversight has been assigned have the necessary competence, training and\nauthority to carry out that role.\n\n(49)High-risk AI systems should perform consistently throughout their\nlifecycle and meet an appropriate level of accuracy, robustness and\ncybersecurity in accordance with the generally acknowledged state of the art.\nThe level of accuracy and accuracy metrics should be communicated to the\nusers.\n\n(50)The technical robustness is a key requirement for high-risk AI systems.\nThey should be resilient against risks connected to the limitations of the\nsystem (e.g. errors, faults, inconsistencies, unexpected situations) as well\nas against malicious actions that may compromise the security of the AI system\nand result in harmful or otherwise undesirable behaviour. Failure to protect\nagainst these risks could lead to safety impacts or negatively affect the\nfundamental rights, for example due to erroneous decisions or wrong or biased\noutputs generated by the AI system.\n\n(51)Cybersecurity plays a crucial role in ensuring that AI systems are\nresilient against attempts to alter their use, behaviour, performance or\ncompromise their security properties by malicious third parties exploiting the\nsystem\u2019s vulnerabilities. Cyberattacks against AI systems can leverage AI\nspecific assets, such as training data sets (e.g. data poisoning) or trained\nmodels (e.g. adversarial attacks), or exploit vulnerabilities in the AI\nsystem\u2019s digital assets or the underlying ICT infrastructure. To ensure a\nlevel of cybersecurity appropriate to the risks, suitable measures should\ntherefore be taken by the providers of high-risk AI systems, also taking into\naccount as appropriate the underlying ICT infrastructure.\n\n(52)As part of Union harmonisation legislation, rules applicable to the\nplacing on the market, putting into service and use of high-risk AI systems\nshould be laid down consistently with Regulation (EC) No 765/2008 of the\nEuropean Parliament and of the Council 51 setting out the requirements for\naccreditation and the market surveillance of products, Decision No 768/2008/EC\nof the European Parliament and of the Council 52 on a common framework for the\nmarketing of products and Regulation (EU) 2019/1020 of the European Parliament\nand of the Council 53 on market surveillance and compliance of products (\u2018New\nLegislative Framework for the marketing of products\u2019).\n\n(53)It is appropriate that a specific natural or legal person, defined as the\nprovider, takes the responsibility for the placing on the market or putting\ninto service of a high-risk AI system, regardless of whether that natural or\nlegal person is the person who designed or developed the system.\n\n(54)The provider should establish a sound quality management system, ensure\nthe accomplishment of the required conformity assessment procedure, draw up\nthe relevant documentation and establish a robust post-market monitoring\nsystem. Public authorities which put into service high-risk AI systems for\ntheir own use may adopt and implement the rules for the quality management\nsystem as part of the quality management system adopted at a national or\nregional level, as appropriate, taking into account the specificities of the\nsector and the competences and organisation of the public authority in\nquestion.",
"d4cbfa73-28bf-45b4-8bdf-a37cb739655f": "(53)It is appropriate that a specific natural or legal person, defined as the\nprovider, takes the responsibility for the placing on the market or putting\ninto service of a high-risk AI system, regardless of whether that natural or\nlegal person is the person who designed or developed the system.\n\n(54)The provider should establish a sound quality management system, ensure\nthe accomplishment of the required conformity assessment procedure, draw up\nthe relevant documentation and establish a robust post-market monitoring\nsystem. Public authorities which put into service high-risk AI systems for\ntheir own use may adopt and implement the rules for the quality management\nsystem as part of the quality management system adopted at a national or\nregional level, as appropriate, taking into account the specificities of the\nsector and the competences and organisation of the public authority in\nquestion.\n\n(55)Where a high-risk AI system that is a safety component of a product which\nis covered by a relevant New Legislative Framework sectorial legislation is\nnot placed on the market or put into service independently from the product,\nthe manufacturer of the final product as defined under the relevant New\nLegislative Framework legislation should comply with the obligations of the\nprovider established in this Regulation and notably ensure that the AI system\nembedded in the final product complies with the requirements of this\nRegulation.\n\n(56)To enable enforcement of this Regulation and create a level-playing field\nfor operators, and taking into account the different forms of making available\nof digital products, it is important to ensure that, under all circumstances,\na person established in the Union can provide authorities with all the\nnecessary information on the compliance of an AI system. Therefore, prior to\nmaking their AI systems available in the Union, where an importer cannot be\nidentified, providers established outside the Union shall, by written mandate,\nappoint an authorised representative established in the Union.\n\n(57)In line with New Legislative Framework principles, specific obligations\nfor relevant economic operators, such as importers and distributors, should be\nset to ensure legal certainty and facilitate regulatory compliance by those\nrelevant operators.\n\n(58)Given the nature of AI systems and the risks to safety and fundamental\nrights possibly associated with their use, including as regard the need to\nensure proper monitoring of the performance of an AI system in a real-life\nsetting, it is appropriate to set specific responsibilities for users. Users\nshould in particular use high-risk AI systems in accordance with the\ninstructions of use and certain other obligations should be provided for with\nregard to monitoring of the functioning of the AI systems and with regard to\nrecord-keeping, as appropriate.\n\n(59)It is appropriate to envisage that the user of the AI system should be the\nnatural or legal person, public authority, agency or other body under whose\nauthority the AI system is operated except where the use is made in the course\nof a personal non-professional activity.\n\n(60)In the light of the complexity of the artificial intelligence value chain,\nrelevant third parties, notably the ones involved in the sale and the supply\nof software, software tools and components, pre-trained models and data, or\nproviders of network services, should cooperate, as appropriate, with\nproviders and users to enable their compliance with the obligations under this\nRegulation and with competent authorities established under this Regulation.\n\n(61)Standardisation should play a key role to provide technical solutions to\nproviders to ensure compliance with this Regulation. Compliance with\nharmonised standards as defined in Regulation (EU) No 1025/2012 of the\nEuropean Parliament and of the Council 54 should be a means for providers to\ndemonstrate conformity with the requirements of this Regulation. However, the\nCommission could adopt common technical specifications in areas where no\nharmonised standards exist or where they are insufficient.\n\n(62)In order to ensure a high level of trustworthiness of high-risk AI\nsystems, those systems should be subject to a conformity assessment prior to\ntheir placing on the market or putting into service.\n\n(63)It is appropriate that, in order to minimise the burden on operators and\navoid any possible duplication, for high-risk AI systems related to products\nwhich are covered by existing Union harmonisation legislation following the\nNew Legislative Framework approach, the compliance of those AI systems with\nthe requirements of this Regulation should be assessed as part of the\nconformity assessment already foreseen under that legislation. The\napplicability of the requirements of this Regulation should thus not affect\nthe specific logic, methodology or general structure of conformity assessment\nunder the relevant specific New Legislative Framework legislation. This\napproach is fully reflected in the interplay between this Regulation and the\n[Machinery Regulation].",
"b0cc4e44-e7d7-4106-847b-133fcade3740": "(62)In order to ensure a high level of trustworthiness of high-risk AI\nsystems, those systems should be subject to a conformity assessment prior to\ntheir placing on the market or putting into service.\n\n(63)It is appropriate that, in order to minimise the burden on operators and\navoid any possible duplication, for high-risk AI systems related to products\nwhich are covered by existing Union harmonisation legislation following the\nNew Legislative Framework approach, the compliance of those AI systems with\nthe requirements of this Regulation should be assessed as part of the\nconformity assessment already foreseen under that legislation. The\napplicability of the requirements of this Regulation should thus not affect\nthe specific logic, methodology or general structure of conformity assessment\nunder the relevant specific New Legislative Framework legislation. This\napproach is fully reflected in the interplay between this Regulation and the\n[Machinery Regulation]. While safety risks of AI systems ensuring safety\nfunctions in machinery are addressed by the requirements of this Regulation,\ncertain specific requirements in the [Machinery Regulation] will ensure the\nsafe integration of the AI system into the overall machinery, so as not to\ncompromise the safety of the machinery as a whole. The [Machinery Regulation]\napplies the same definition of AI system as this Regulation.\n\n(64)Given the more extensive experience of professional pre-market certifiers\nin the field of product safety and the different nature of risks involved, it\nis appropriate to limit, at least in an initial phase of application of this\nRegulation, the scope of application of third-party conformity assessment for\nhigh-risk AI systems other than those related to products. Therefore, the\nconformity assessment of such systems should be carried out as a general rule\nby the provider under its own responsibility, with the only exception of AI\nsystems intended to be used for the remote biometric identification of\npersons, for which the involvement of a notified body in the conformity\nassessment should be foreseen, to the extent they are not prohibited.\n\n(65)In order to carry out third-party conformity assessment for AI systems\nintended to be used for the remote biometric identification of persons,\nnotified bodies should be designated under this Regulation by the national\ncompetent authorities, provided they are compliant with a set of requirements,\nnotably on independence, competence and absence of conflicts of interests.\n\n(66)In line with the commonly established notion of substantial modification\nfor products regulated by Union harmonisation legislation, it is appropriate\nthat an AI system undergoes a new conformity assessment whenever a change\noccurs which may affect the compliance of the system with this Regulation or\nwhen the intended purpose of the system changes. In addition, as regards AI\nsystems which continue to \u2018learn\u2019 after being placed on the market or put into\nservice (i.e. they automatically adapt how functions are carried out), it is\nnecessary to provide rules establishing that changes to the algorithm and its\nperformance that have been pre-determined by the provider and assessed at the\nmoment of the conformity assessment should not constitute a substantial\nmodification.\n\n(67)High-risk AI systems should bear the CE marking to indicate their\nconformity with this Regulation so that they can move freely within the\ninternal market. Member States should not create unjustified obstacles to the\nplacing on the market or putting into service of high-risk AI systems that\ncomply with the requirements laid down in this Regulation and bear the CE\nmarking.\n\n(68)Under certain conditions, rapid availability of innovative technologies\nmay be crucial for health and safety of persons and for society as a whole. It\nis thus appropriate that under exceptional reasons of public security or\nprotection of life and health of natural persons and the protection of\nindustrial and commercial property, Member States could authorise the placing\non the market or putting into service of AI systems which have not undergone a\nconformity assessment.\n\n(69)In order to facilitate the work of the Commission and the Member States in\nthe artificial intelligence field as well as to increase the transparency\ntowards the public, providers of high-risk AI systems other than those related\nto products falling within the scope of relevant existing Union harmonisation\nlegislation, should be required to register their high-risk AI system in a EU\ndatabase, to be established and managed by the Commission. The Commission\nshould be the controller of that database, in accordance with Regulation (EU)\n2018/1725 of the European Parliament and of the Council 55 . In order to\nensure the full functionality of the database, when deployed, the procedure\nfor setting the database should include the elaboration of functional\nspecifications by the Commission and an independent audit report.",
"8c748186-5974-4c20-b358-a23e48a0b1c5": "(69)In order to facilitate the work of the Commission and the Member States in\nthe artificial intelligence field as well as to increase the transparency\ntowards the public, providers of high-risk AI systems other than those related\nto products falling within the scope of relevant existing Union harmonisation\nlegislation, should be required to register their high-risk AI system in a EU\ndatabase, to be established and managed by the Commission. The Commission\nshould be the controller of that database, in accordance with Regulation (EU)\n2018/1725 of the European Parliament and of the Council 55 . In order to\nensure the full functionality of the database, when deployed, the procedure\nfor setting the database should include the elaboration of functional\nspecifications by the Commission and an independent audit report.\n\n(70)Certain AI systems intended to interact with natural persons or to\ngenerate content may pose specific risks of impersonation or deception\nirrespective of whether they qualify as high-risk or not. In certain\ncircumstances, the use of these systems should therefore be subject to\nspecific transparency obligations without prejudice to the requirements and\nobligations for high-risk AI systems. In particular, natural persons should be\nnotified that they are interacting with an AI system, unless this is obvious\nfrom the circumstances and the context of use. Moreover, natural persons\nshould be notified when they are exposed to an emotion recognition system or a\nbiometric categorisation system. Such information and notifications should be\nprovided in accessible formats for persons with disabilities. Further, users,\nwho use an AI system to generate or manipulate image, audio or video content\nthat appreciably resembles existing persons, places or events and would\nfalsely appear to a person to be authentic, should disclose that the content\nhas been artificially created or manipulated by labelling the artificial\nintelligence output accordingly and disclosing its artificial origin.\n\n(71)Artificial intelligence is a rapidly developing family of technologies\nthat requires novel forms of regulatory oversight and a safe space for\nexperimentation, while ensuring responsible innovation and integration of\nappropriate safeguards and risk mitigation measures. To ensure a legal\nframework that is innovation-friendly, future-proof and resilient to\ndisruption, national competent authorities from one or more Member States\nshould be encouraged to establish artificial intelligence regulatory sandboxes\nto facilitate the development and testing of innovative AI systems under\nstrict regulatory oversight before these systems are placed on the market or\notherwise put into service.\n\n(72)The objectives of the regulatory sandboxes should be to foster AI\ninnovation by establishing a controlled experimentation and testing\nenvironment in the development and pre-marketing phase with a view to ensuring\ncompliance of the innovative AI systems with this Regulation and other\nrelevant Union and Member States legislation; to enhance legal certainty for\ninnovators and the competent authorities\u2019 oversight and understanding of the\nopportunities, emerging risks and the impacts of AI use, and to accelerate\naccess to markets, including by removing barriers for small and medium\nenterprises (SMEs) and start-ups. To ensure uniform implementation across the\nUnion and economies of scale, it is appropriate to establish common rules for\nthe regulatory sandboxes\u2019 implementation and a framework for cooperation\nbetween the relevant authorities involved in the supervision of the sandboxes.\nThis Regulation should provide the legal basis for the use of personal data\ncollected for other purposes for developing certain AI systems in the public\ninterest within the AI regulatory sandbox, in line with Article 6(4) of\nRegulation (EU) 2016/679, and Article 6 of Regulation (EU) 2018/1725, and\nwithout prejudice to Article 4(2) of Directive (EU) 2016/680. Participants in\nthe sandbox should ensure appropriate safeguards and cooperate with the\ncompetent authorities, including by following their guidance and acting\nexpeditiously and in good faith to mitigate any high-risks to safety and\nfundamental rights that may arise during the development and experimentation\nin the sandbox. The conduct of the participants in the sandbox should be taken\ninto account when competent authorities decide whether to impose an\nadministrative fine under Article 83(2) of Regulation 2016/679 and Article 57\nof Directive 2016/680.\n\n(73)In order to promote and protect innovation, it is important that the\ninterests of small-scale providers and users of AI systems are taken into\nparticular account. To this objective, Member States should develop\ninitiatives, which are targeted at those operators, including on awareness\nraising and information communication. Moreover, the specific interests and\nneeds of small-scale providers shall be taken into account when Notified\nBodies set conformity assessment fees.",
"00d89864-aae6-42a2-a1e2-a7a44c05a99c": "The conduct of the participants in the sandbox should be taken\ninto account when competent authorities decide whether to impose an\nadministrative fine under Article 83(2) of Regulation 2016/679 and Article 57\nof Directive 2016/680.\n\n(73)In order to promote and protect innovation, it is important that the\ninterests of small-scale providers and users of AI systems are taken into\nparticular account. To this objective, Member States should develop\ninitiatives, which are targeted at those operators, including on awareness\nraising and information communication. Moreover, the specific interests and\nneeds of small-scale providers shall be taken into account when Notified\nBodies set conformity assessment fees. Translation costs related to mandatory\ndocumentation and communication with authorities may constitute a significant\ncost for providers and other operators, notably those of a smaller scale.\nMember States should possibly ensure that one of the languages determined and\naccepted by them for relevant providers\u2019 documentation and for communication\nwith operators is one which is broadly understood by the largest possible\nnumber of cross-border users.\n\n(74)In order to minimise the risks to implementation resulting from lack of\nknowledge and expertise in the market as well as to facilitate compliance of\nproviders and notified bodies with their obligations under this Regulation,\nthe AI-on demand platform, the European Digital Innovation Hubs and the\nTesting and Experimentation Facilities established by the Commission and the\nMember States at national or EU level should possibly contribute to the\nimplementation of this Regulation. Within their respective mission and fields\nof competence, they may provide in particular technical and scientific support\nto providers and notified bodies.\n\n(75)It is appropriate that the Commission facilitates, to the extent possible,\naccess to Testing and Experimentation Facilities to bodies, groups or\nlaboratories established or accredited pursuant to any relevant Union\nharmonisation legislation and which fulfil tasks in the context of conformity\nassessment of products or devices covered by that Union harmonisation\nlegislation. This is notably the case for expert panels, expert laboratories\nand reference laboratories in the field of medical devices pursuant to\nRegulation (EU) 2017/745 and Regulation (EU) 2017/746.\n\n(76)In order to facilitate a smooth, effective and harmonised implementation\nof this Regulation a European Artificial Intelligence Board should be\nestablished. The Board should be responsible for a number of advisory tasks,\nincluding issuing opinions, recommendations, advice or guidance on matters\nrelated to the implementation of this Regulation, including on technical\nspecifications or existing standards regarding the requirements established in\nthis Regulation and providing advice to and assisting the Commission on\nspecific questions related to artificial intelligence.\n\n(77)Member States hold a key role in the application and enforcement of this\nRegulation. In this respect, each Member State should designate one or more\nnational competent authorities for the purpose of supervising the application\nand implementation of this Regulation. In order to increase organisation\nefficiency on the side of Member States and to set an official point of\ncontact vis-\u00e0-vis the public and other counterparts at Member State and Union\nlevels, in each Member State one national authority should be designated as\nnational supervisory authority.\n\n(78)In order to ensure that providers of high-risk AI systems can take into\naccount the experience on the use of high-risk AI systems for improving their\nsystems and the design and development process or can take any possible\ncorrective action in a timely manner, all providers should have a post-market\nmonitoring system in place. This system is also key to ensure that the\npossible risks emerging from AI systems which continue to \u2018learn\u2019 after being\nplaced on the market or put into service can be more efficiently and timely\naddressed. In this context, providers should also be required to have a system\nin place to report to the relevant authorities any serious incidents or any\nbreaches to national and Union law protecting fundamental rights resulting\nfrom the use of their AI systems.\n\n(79)In order to ensure an appropriate and effective enforcement of the\nrequirements and obligations set out by this Regulation, which is Union\nharmonisation legislation, the system of market surveillance and compliance of\nproducts established by Regulation (EU) 2019/1020 should apply in its\nentirety. Where necessary for their mandate, national public authorities or\nbodies, which supervise the application of Union law protecting fundamental\nrights, including equality bodies, should also have access to any\ndocumentation created under this Regulation.\n\n(80)Union legislation on financial services includes internal governance and\nrisk management rules and requirements which are applicable to regulated\nfinancial institutions in the course of provision of those services, including\nwhen they make use of AI systems.",
"f262e389-043f-4b99-a9cd-7966f9abcf2d": "(79)In order to ensure an appropriate and effective enforcement of the\nrequirements and obligations set out by this Regulation, which is Union\nharmonisation legislation, the system of market surveillance and compliance of\nproducts established by Regulation (EU) 2019/1020 should apply in its\nentirety. Where necessary for their mandate, national public authorities or\nbodies, which supervise the application of Union law protecting fundamental\nrights, including equality bodies, should also have access to any\ndocumentation created under this Regulation.\n\n(80)Union legislation on financial services includes internal governance and\nrisk management rules and requirements which are applicable to regulated\nfinancial institutions in the course of provision of those services, including\nwhen they make use of AI systems. In order to ensure coherent application and\nenforcement of the obligations under this Regulation and relevant rules and\nrequirements of the Union financial services legislation, the authorities\nresponsible for the supervision and enforcement of the financial services\nlegislation, including where applicable the European Central Bank, should be\ndesignated as competent authorities for the purpose of supervising the\nimplementation of this Regulation, including for market surveillance\nactivities, as regards AI systems provided or used by regulated and supervised\nfinancial institutions. To further enhance the consistency between this\nRegulation and the rules applicable to credit institutions regulated under\nDirective 2013/36/EU of the European Parliament and of the Council 56 , it is\nalso appropriate to integrate the conformity assessment procedure and some of\nthe providers\u2019 procedural obligations in relation to risk management, post\nmarketing monitoring and documentation into the existing obligations and\nprocedures under Directive 2013/36/EU. In order to avoid overlaps, limited\nderogations should also be envisaged in relation to the quality management\nsystem of providers and the monitoring obligation placed on users of high-risk\nAI systems to the extent that these apply to credit institutions regulated by\nDirective 2013/36/EU.\n\n(81)The development of AI systems other than high-risk AI systems in\naccordance with the requirements of this Regulation may lead to a larger\nuptake of trustworthy artificial intelligence in the Union. Providers of non-\nhigh-risk AI systems should be encouraged to create codes of conduct intended\nto foster the voluntary application of the mandatory requirements applicable\nto high-risk AI systems. Providers should also be encouraged to apply on a\nvoluntary basis additional requirements related, for example, to environmental\nsustainability, accessibility to persons with disability, stakeholders\u2019\nparticipation in the design and development of AI systems, and diversity of\nthe development teams. The Commission may develop initiatives, including of a\nsectorial nature, to facilitate the lowering of technical barriers hindering\ncross-border exchange of data for AI development, including on data access\ninfrastructure, semantic and technical interoperability of different types of\ndata.\n\n(82)It is important that AI systems related to products that are not high-risk\nin accordance with this Regulation and thus are not required to comply with\nthe requirements set out herein are nevertheless safe when placed on the\nmarket or put into service. To contribute to this objective, the Directive\n2001/95/EC of the European Parliament and of the Council 57 would apply as a\nsafety net.\n\n(83)In order to ensure trustful and constructive cooperation of competent\nauthorities on Union and national level, all parties involved in the\napplication of this Regulation should respect the confidentiality of\ninformation and data obtained in carrying out their tasks.\n\n(84)Member States should take all necessary measures to ensure that the\nprovisions of this Regulation are implemented, including by laying down\neffective, proportionate and dissuasive penalties for their infringement. For\ncertain specific infringements, Member States should take into account the\nmargins and criteria set out in this Regulation. The European Data Protection\nSupervisor should have the power to impose fines on Union institutions,\nagencies and bodies falling within the scope of this Regulation.\n\n(85)In order to ensure that the regulatory framework can be adapted where\nnecessary, the power to adopt acts in accordance with Article 290 TFEU should\nbe delegated to the Commission to amend the techniques and approaches referred\nto in Annex I to define AI systems, the Union harmonisation legislation listed\nin Annex II, the high-risk AI systems listed in Annex III, the provisions\nregarding technical documentation listed in Annex IV, the content of the EU\ndeclaration of conformity in Annex V, the provisions regarding the conformity\nassessment procedures in Annex VI and VII and the provisions establishing the\nhigh-risk AI systems to which the conformity assessment procedure based on\nassessment of the quality management system and assessment of the technical\ndocumentation should apply.",
"a010eb89-971a-4228-9bfb-d0868982e676": "The European Data Protection\nSupervisor should have the power to impose fines on Union institutions,\nagencies and bodies falling within the scope of this Regulation.\n\n(85)In order to ensure that the regulatory framework can be adapted where\nnecessary, the power to adopt acts in accordance with Article 290 TFEU should\nbe delegated to the Commission to amend the techniques and approaches referred\nto in Annex I to define AI systems, the Union harmonisation legislation listed\nin Annex II, the high-risk AI systems listed in Annex III, the provisions\nregarding technical documentation listed in Annex IV, the content of the EU\ndeclaration of conformity in Annex V, the provisions regarding the conformity\nassessment procedures in Annex VI and VII and the provisions establishing the\nhigh-risk AI systems to which the conformity assessment procedure based on\nassessment of the quality management system and assessment of the technical\ndocumentation should apply. It is of particular importance that the Commission\ncarry out appropriate consultations during its preparatory work, including at\nexpert level, and that those consultations be conducted in accordance with the\nprinciples laid down in the Interinstitutional Agreement of 13 April 2016 on\nBetter Law-Making 58 . In particular, to ensure equal participation in the\npreparation of delegated acts, the European Parliament and the Council receive\nall documents at the same time as Member States\u2019 experts, and their experts\nsystematically have access to meetings of Commission expert groups dealing\nwith the preparation of delegated acts.\n\n(86)In order to ensure uniform conditions for the implementation of this\nRegulation, implementing powers should be conferred on the Commission. Those\npowers should be exercised in accordance with Regulation (EU) No 182/2011 of\nthe European Parliament and of the Council 59 .\n\n(87)Since the objective of this Regulation cannot be sufficiently achieved by\nthe Member States and can rather, by reason of the scale or effects of the\naction, be better achieved at Union level, the Union may adopt measures in\naccordance with the principle of subsidiarity as set out in Article 5 TEU. In\naccordance with the principle of proportionality as set out in that Article,\nthis Regulation does not go beyond what is necessary in order to achieve that\nobjective.\n\n(88)This Regulation should apply from \u2026 [OP \u2013 please insert the date\nestablished in Art. 85]. However, the infrastructure related to the governance\nand the conformity assessment system should be operational before that date,\ntherefore the provisions on notified bodies and governance structure should\napply from \u2026 [OP \u2013 please insert the date \u2013 three months following the entry\ninto force of this Regulation]. In addition, Member States should lay down and\nnotify to the Commission the rules on penalties, including administrative\nfines, and ensure that they are properly and effectively implemented by the\ndate of application of this Regulation. Therefore the provisions on penalties\nshould apply from [OP \u2013 please insert the date \u2013 twelve months following the\nentry into force of this Regulation].\n\n(89)The European Data Protection Supervisor and the European Data Protection\nBoard were consulted in accordance with Article 42(2) of Regulation (EU)\n2018/1725 and delivered an opinion on [\u2026]\u201d.\n\nHAVE ADOPTED THIS REGULATION:\n\nTITLE I\n\nGENERAL PROVISIONS\n\nArticle 1 \nSubject matter\n\nThis Regulation lays down:\n\n(a)harmonised rules for the placing on the market, the putting into service\nand the use of artificial intelligence systems (\u2018AI systems\u2019) in the Union;\n\n(a)prohibitions of certain artificial intelligence practices;\n\n(b)specific requirements for high-risk AI systems and obligations for\noperators of such systems;\n\n(c)harmonised transparency rules for AI systems intended to interact with\nnatural persons, emotion recognition systems and biometric categorisation\nsystems, and AI systems used to generate or manipulate image, audio or video\ncontent;\n\n(d)rules on market monitoring and surveillance.",
"b7abaa44-260a-4208-8e9c-9f98ab236247": "(89)The European Data Protection Supervisor and the European Data Protection\nBoard were consulted in accordance with Article 42(2) of Regulation (EU)\n2018/1725 and delivered an opinion on [\u2026]\u201d.\n\nHAVE ADOPTED THIS REGULATION:\n\nTITLE I\n\nGENERAL PROVISIONS\n\nArticle 1 \nSubject matter\n\nThis Regulation lays down:\n\n(a)harmonised rules for the placing on the market, the putting into service\nand the use of artificial intelligence systems (\u2018AI systems\u2019) in the Union;\n\n(a)prohibitions of certain artificial intelligence practices;\n\n(b)specific requirements for high-risk AI systems and obligations for\noperators of such systems;\n\n(c)harmonised transparency rules for AI systems intended to interact with\nnatural persons, emotion recognition systems and biometric categorisation\nsystems, and AI systems used to generate or manipulate image, audio or video\ncontent;\n\n(d)rules on market monitoring and surveillance.\n\nArticle 2 \nScope\n\n1.This Regulation applies to:\n\n(a)providers placing on the market or putting into service AI systems in the\nUnion, irrespective of whether those providers are established within the\nUnion or in a third country;\n\n(b)users of AI systems located within the Union;\n\n(c)providers and users of AI systems that are located in a third country,\nwhere the output produced by the system is used in the Union;\n\n2.For high-risk AI systems that are safety components of products or systems,\nor which are themselves products or systems, falling within the scope of the\nfollowing acts, only Article 84 of this Regulation shall apply:\n\n(a)Regulation (EC) 300/2008;\n\n(b)Regulation (EU) No 167/2013;\n\n(c)Regulation (EU) No 168/2013;\n\n(d)Directive 2014/90/EU;\n\n(e)Directive (EU) 2016/797;\n\n(f)Regulation (EU) 2018/858;\n\n(g)Regulation (EU) 2018/1139;\n\n(h)Regulation (EU) 2019/2144.\n\n3.This Regulation shall not apply to AI systems developed or used exclusively\nfor military purposes.\n\n4.This Regulation shall not apply to public authorities in a third country nor\nto international organisations falling within the scope of this Regulation\npursuant to paragraph 1, where those authorities or organisations use AI\nsystems in the framework of international agreements for law enforcement and\njudicial cooperation with the Union or with one or more Member States.\n\n5.This Regulation shall not affect the application of the provisions on the\nliability of intermediary service providers set out in Chapter II, Section IV\nof Directive 2000/31/EC of the European Parliament and of the Council 60 [as\nto be replaced by the corresponding provisions of the Digital Services Act].\n\nArticle 3 \nDefinitions\n\nFor the purpose of this Regulation, the following definitions apply:\n\n(1)\u2018artificial intelligence system\u2019 (AI system) means software that is\ndeveloped with one or more of the techniques and approaches listed in Annex I\nand can, for a given set of human-defined objectives, generate outputs such as\ncontent, predictions, recommendations, or decisions influencing the\nenvironments they interact with;\n\n(1)\u2018provider\u2019 means a natural or legal person, public authority, agency or\nother body that develops an AI system or that has an AI system developed with\na view to placing it on the market or putting it into service under its own\nname or trademark, whether for payment or free of charge;\n\n(3)\u2018small-scale provider\u2019 means a provider that is a micro or small enterprise\nwithin the meaning of Commission Recommendation 2003/361/EC 61 ;\n\n(4)\u2018user\u2019 means any natural or legal person, public authority, agency or other\nbody using an AI system under its authority, except where the AI system is\nused in the course of a personal non-professional activity;\n\n(5)\u2018authorised representative\u2019 means any natural or legal person established\nin the Union who has received a written mandate from a provider of an AI\nsystem to, respectively, perform and carry out on its behalf the obligations\nand procedures established by this Regulation;\n\n(6)\u2018importer\u2019 means any natural or legal person established in the Union that\nplaces on the market or puts into service an AI system that bears the name or\ntrademark of a natural or legal person established outside the Union;\n\n(7)\u2018distributor\u2019 means any natural or legal person in the supply chain, other\nthan the provider or the importer, that makes an AI system available on the\nUnion market without affecting its properties;\n\n(8)\u2018operator\u2019 means the provider, the user, the authorised representative, the\nimporter and the distributor;",
"72e09799-04b4-49e8-b82c-29d3990967b2": "except where the AI system is\nused in the course of a personal non-professional activity;\n\n(5)\u2018authorised representative\u2019 means any natural or legal person established\nin the Union who has received a written mandate from a provider of an AI\nsystem to, respectively, perform and carry out on its behalf the obligations\nand procedures established by this Regulation;\n\n(6)\u2018importer\u2019 means any natural or legal person established in the Union that\nplaces on the market or puts into service an AI system that bears the name or\ntrademark of a natural or legal person established outside the Union;\n\n(7)\u2018distributor\u2019 means any natural or legal person in the supply chain, other\nthan the provider or the importer, that makes an AI system available on the\nUnion market without affecting its properties;\n\n(8)\u2018operator\u2019 means the provider, the user, the authorised representative, the\nimporter and the distributor;\n\n(9)\u2018placing on the market\u2019 means the first making available of an AI system on\nthe Union market;\n\n(10)\u2018making available on the market\u2019 means any supply of an AI system for\ndistribution or use on the Union market in the course of a commercial\nactivity, whether in return for payment or free of charge;\n\n(11)\u2018putting into service\u2019 means the supply of an AI system for first use\ndirectly to the user or for own use on the Union market for its intended\npurpose;\n\n(12)\u2018intended purpose\u2019 means the use for which an AI system is intended by the\nprovider, including the specific context and conditions of use, as specified\nin the information supplied by the provider in the instructions for use,\npromotional or sales materials and statements, as well as in the technical\ndocumentation;\n\n(13)\u2018reasonably foreseeable misuse\u2019 means the use of an AI system in a way\nthat is not in accordance with its intended purpose, but which may result from\nreasonably foreseeable human behaviour or interaction with other systems;\n\n(14)\u2018safety component of a product or system\u2019 means a component of a product\nor of a system which fulfils a safety function for that product or system or\nthe failure or malfunctioning of which endangers the health and safety of\npersons or property;\n\n(15)\u2018instructions for use\u2019 means the information provided by the provider to\ninform the user of in particular an AI system\u2019s intended purpose and proper\nuse, inclusive of the specific geographical, behavioural or functional setting\nwithin which the high-risk AI system is intended to be used;\n\n(16)\u2018recall of an AI system\u2019 means any measure aimed at achieving the return\nto the provider of an AI system made available to users;\n\n(17)\u2018withdrawal of an AI system\u2019 means any measure aimed at preventing the\ndistribution, display and offer of an AI system;\n\n(18)\u2018performance of an AI system\u2019 means the ability of an AI system to achieve\nits intended purpose;\n\n(19)\u2018notifying authority\u2019 means the national authority responsible for setting\nup and carrying out the necessary procedures for the assessment, designation\nand notification of conformity assessment bodies and for their monitoring;\n\n(20)\u2018conformity assessment\u2019 means the process of verifying whether the\nrequirements set out in Title III, Chapter 2 of this Regulation relating to an\nAI system have been fulfilled;\n\n(21)\u2018conformity assessment body\u2019 means a body that performs third-party\nconformity assessment activities, including testing, certification and\ninspection;\n\n(22)\u2018notified body\u2019 means a conformity assessment body designated in\naccordance with this Regulation and other relevant Union harmonisation\nlegislation;\n\n(23)\u2018substantial modification\u2019 means a change to the AI system following its\nplacing on the market or putting into service which affects the compliance of\nthe AI system with the requirements set out in Title III, Chapter 2 of this\nRegulation or results in a modification to the intended purpose for which the\nAI system has been assessed;\n\n(24)\u2018CE marking of conformity\u2019 (CE marking) means a marking by which a\nprovider indicates that an AI system is in conformity with the requirements\nset out in Title III, Chapter 2 of this Regulation and other applicable Union\nlegislation harmonising the conditions for the marketing of products (\u2018Union\nharmonisation legislation\u2019) providing for its affixing;\n\n(25)\u2018post-market monitoring\u2019 means all activities carried out by providers of\nAI systems to proactively collect and review experience gained from the use of\nAI systems they place on the market or put into service for the purpose of\nidentifying any need to immediately apply any necessary corrective or\npreventive actions;",
"467de78f-78b5-4ecd-b8d5-273a5532856f": "Chapter 2 of this\nRegulation or results in a modification to the intended purpose for which the\nAI system has been assessed;\n\n(24)\u2018CE marking of conformity\u2019 (CE marking) means a marking by which a\nprovider indicates that an AI system is in conformity with the requirements\nset out in Title III, Chapter 2 of this Regulation and other applicable Union\nlegislation harmonising the conditions for the marketing of products (\u2018Union\nharmonisation legislation\u2019) providing for its affixing;\n\n(25)\u2018post-market monitoring\u2019 means all activities carried out by providers of\nAI systems to proactively collect and review experience gained from the use of\nAI systems they place on the market or put into service for the purpose of\nidentifying any need to immediately apply any necessary corrective or\npreventive actions;\n\n(26)\u2018market surveillance authority\u2019 means the national authority carrying out\nthe activities and taking the measures pursuant to Regulation (EU) 2019/1020;\n\n(27)\u2018harmonised standard\u2019 means a European standard as defined in Article\n2(1)(c) of Regulation (EU) No 1025/2012;\n\n(28)\u2018common specifications\u2019 means a document, other than a standard,\ncontaining technical solutions providing a means to, comply with certain\nrequirements and obligations established under this Regulation;\n\n(29)\u2018training data\u2019 means data used for training an AI system through fitting\nits learnable parameters, including the weights of a neural network;\n\n(30)\u2018validation data\u2019 means data used for providing an evaluation of the\ntrained AI system and for tuning its non-learnable parameters and its learning\nprocess, among other things, in order to prevent overfitting; whereas the\nvalidation dataset can be a separate dataset or part of the training dataset,\neither as a fixed or variable split;\n\n(31)\u2018testing data\u2019 means data used for providing an independent evaluation of\nthe trained and validated AI system in order to confirm the expected\nperformance of that system before its placing on the market or putting into\nservice;\n\n(32)\u2018input data\u2019 means data provided to or directly acquired by an AI system\non the basis of which the system produces an output;\n\n(33)\u2018biometric data\u2019 means personal data resulting from specific technical\nprocessing relating to the physical, physiological or behavioural\ncharacteristics of a natural person, which allow or confirm the unique\nidentification of that natural person, such as facial images or dactyloscopic\ndata;\n\n(34)\u2018emotion recognition system\u2019 means an AI system for the purpose of\nidentifying or inferring emotions or intentions of natural persons on the\nbasis of their biometric data;\n\n(35)\u2018biometric categorisation system\u2019 means an AI system for the purpose of\nassigning natural persons to specific categories, such as sex, age, hair\ncolour, eye colour, tattoos, ethnic origin or sexual or political orientation,\non the basis of their biometric data;\n\n(36)\u2018remote biometric identification system\u2019 means an AI system for the\npurpose of identifying natural persons at a distance through the comparison of\na person\u2019s biometric data with the biometric data contained in a reference\ndatabase, and without prior knowledge of the user of the AI system whether the\nperson will be present and can be identified ;\n\n(37)\u2018\u2018real-time\u2019 remote biometric identification system\u2019 means a remote\nbiometric identification system whereby the capturing of biometric data, the\ncomparison and the identification all occur without a significant delay. This\ncomprises not only instant identification, but also limited short delays in\norder to avoid circumvention.",
"8ad6d948-ea50-4cbd-93a5-92860de7e2c8": "(35)\u2018biometric categorisation system\u2019 means an AI system for the purpose of\nassigning natural persons to specific categories, such as sex, age, hair\ncolour, eye colour, tattoos, ethnic origin or sexual or political orientation,\non the basis of their biometric data;\n\n(36)\u2018remote biometric identification system\u2019 means an AI system for the\npurpose of identifying natural persons at a distance through the comparison of\na person\u2019s biometric data with the biometric data contained in a reference\ndatabase, and without prior knowledge of the user of the AI system whether the\nperson will be present and can be identified ;\n\n(37)\u2018\u2018real-time\u2019 remote biometric identification system\u2019 means a remote\nbiometric identification system whereby the capturing of biometric data, the\ncomparison and the identification all occur without a significant delay. This\ncomprises not only instant identification, but also limited short delays in\norder to avoid circumvention.\n\n(38)\u2018\u2018post\u2019 remote biometric identification system\u2019 means a remote biometric\nidentification system other than a \u2018real-time\u2019 remote biometric identification\nsystem;\n\n(39)\u2018publicly accessible space\u2019 means any physical place accessible to the\npublic, regardless of whether certain conditions for access may apply;\n\n(40)\u2018law enforcement authority\u2019 means:\n\n(a)any public authority competent for the prevention, investigation, detection\nor prosecution of criminal offences or the execution of criminal penalties,\nincluding the safeguarding against and the prevention of threats to public\nsecurity; or\n\n(b)any other body or entity entrusted by Member State law to exercise public\nauthority and public powers for the purposes of the prevention, investigation,\ndetection or prosecution of criminal offences or the execution of criminal\npenalties, including the safeguarding against and the prevention of threats to\npublic security;\n\n(41)\u2018law enforcement\u2019 means activities carried out by law enforcement\nauthorities for the prevention, investigation, detection or prosecution of\ncriminal offences or the execution of criminal penalties, including the\nsafeguarding against and the prevention of threats to public security;\n\n(42)\u2018national supervisory authority\u2019 means the authority to which a Member\nState assigns the responsibility for the implementation and application of\nthis Regulation, for coordinating the activities entrusted to that Member\nState, for acting as the single contact point for the Commission, and for\nrepresenting the Member State at the European Artificial Intelligence Board;\n\n(43)\u2018national competent authority\u2019 means the national supervisory authority,\nthe notifying authority and the market surveillance authority;\n\n(44)\u2018serious incident\u2019 means any incident that directly or indirectly leads,\nmight have led or might lead to any of the following:\n\n(a)the death of a person or serious damage to a person\u2019s health, to property\nor the environment,\n\n(b)a serious and irreversible disruption of the management and operation of\ncritical infrastructure.\n\nArticle 4 \nAmendments to Annex I\n\nThe Commission is empowered to adopt delegated acts in accordance with Article\n73 to amend the list of techniques and approaches listed in Annex I, in order\nto update that list to market and technological developments on the basis of\ncharacteristics that are similar to the techniques and approaches listed\ntherein.",
"95c1e8a9-c746-4879-aa55-a1c0d24ea275": "Article 4 \nAmendments to Annex I\n\nThe Commission is empowered to adopt delegated acts in accordance with Article\n73 to amend the list of techniques and approaches listed in Annex I, in order\nto update that list to market and technological developments on the basis of\ncharacteristics that are similar to the techniques and approaches listed\ntherein.\n\nTITLE II\n\nPROHIBITED ARTIFICIAL INTELLIGENCE PRACTICES\n\nArticle 5\n\n1.The following artificial intelligence practices shall be prohibited:\n\n(a)the placing on the market, putting into service or use of an AI system that\ndeploys subliminal techniques beyond a person\u2019s consciousness in order to\nmaterially distort a person\u2019s behaviour in a manner that causes or is likely\nto cause that person or another person physical or psychological harm;\n\n(b)the placing on the market, putting into service or use of an AI system that\nexploits any of the vulnerabilities of a specific group of persons due to\ntheir age, physical or mental disability, in order to materially distort the\nbehaviour of a person pertaining to that group in a manner that causes or is\nlikely to cause that person or another person physical or psychological harm;\n\n(c)the placing on the market, putting into service or use of AI systems by\npublic authorities or on their behalf for the evaluation or classification of\nthe trustworthiness of natural persons over a certain period of time based on\ntheir social behaviour or known or predicted personal or personality\ncharacteristics, with the social score leading to either or both of the\nfollowing:\n\n(i)detrimental or unfavourable treatment of certain natural persons or whole\ngroups thereof in social contexts which are unrelated to the contexts in which\nthe data was originally generated or collected;\n\n(ii)detrimental or unfavourable treatment of certain natural persons or whole\ngroups thereof that is unjustified or disproportionate to their social\nbehaviour or its gravity;\n\n(d)the use of \u2018real-time\u2019 remote biometric identification systems in publicly\naccessible spaces for the purpose of law enforcement, unless and in as far as\nsuch use is strictly necessary for one of the following objectives:\n\n(i)the targeted search for specific potential victims of crime, including\nmissing children;\n\n(ii)the prevention of a specific, substantial and imminent threat to the life\nor physical safety of natural persons or of a terrorist attack;\n\n(iii)the detection, localisation, identification or prosecution of a\nperpetrator or suspect of a criminal offence referred to in Article 2(2) of\nCouncil Framework Decision 2002/584/JHA 62 and punishable in the Member State\nconcerned by a custodial sentence or a detention order for a maximum period of\nat least three years, as determined by the law of that Member State.\n\n2.The use of \u2018real-time\u2019 remote biometric identification systems in publicly\naccessible spaces for the purpose of law enforcement for any of the objectives\nreferred to in paragraph 1 point d) shall take into account the following\nelements:\n\n(a)the nature of the situation giving rise to the possible use, in particular\nthe seriousness, probability and scale of the harm caused in the absence of\nthe use of the system;\n\n(b)the consequences of the use of the system for the rights and freedoms of\nall persons concerned, in particular the seriousness, probability and scale of\nthose consequences.\n\nIn addition, the use of \u2018real-time\u2019 remote biometric identification systems in\npublicly accessible spaces for the purpose of law enforcement for any of the\nobjectives referred to in paragraph 1 point d) shall comply with necessary and\nproportionate safeguards and conditions in relation to the use, in particular\nas regards the temporal, geographic and personal limitations.\n\n3.As regards paragraphs 1, point (d) and 2, each individual use for the\npurpose of law enforcement of a \u2018real-time\u2019 remote biometric identification\nsystem in publicly accessible spaces shall be subject to a prior authorisation\ngranted by a judicial authority or by an independent administrative authority\nof the Member State in which the use is to take place, issued upon a reasoned\nrequest and in accordance with the detailed rules of national law referred to\nin paragraph 4. However, in a duly justified situation of urgency, the use of\nthe system may be commenced without an authorisation and the authorisation may\nbe requested only during or after the use.\n\nThe competent judicial or administrative authority shall only grant the\nauthorisation where it is satisfied, based on objective evidence or clear\nindications presented to it, that the use of the \u2018real-time\u2019 remote biometric\nidentification system at issue is necessary for and proportionate to achieving\none of the objectives specified in paragraph 1, point (d), as identified in\nthe request.",
"31741773-414a-4d6d-935c-9ccb81899c28": "However, in a duly justified situation of urgency, the use of\nthe system may be commenced without an authorisation and the authorisation may\nbe requested only during or after the use.\n\nThe competent judicial or administrative authority shall only grant the\nauthorisation where it is satisfied, based on objective evidence or clear\nindications presented to it, that the use of the \u2018real-time\u2019 remote biometric\nidentification system at issue is necessary for and proportionate to achieving\none of the objectives specified in paragraph 1, point (d), as identified in\nthe request. In deciding on the request, the competent judicial or\nadministrative authority shall take into account the elements referred to in\nparagraph 2.\n\n4.A Member State may decide to provide for the possibility to fully or\npartially authorise the use of \u2018real-time\u2019 remote biometric identification\nsystems in publicly accessible spaces for the purpose of law enforcement\nwithin the limits and under the conditions listed in paragraphs 1, point (d),\n2 and 3. That Member State shall lay down in its national law the necessary\ndetailed rules for the request, issuance and exercise of, as well as\nsupervision relating to, the authorisations referred to in paragraph 3. Those\nrules shall also specify in respect of which of the objectives listed in\nparagraph 1, point (d), including which of the criminal offences referred to\nin point (iii) thereof, the competent authorities may be authorised to use\nthose systems for the purpose of law enforcement.\n\nTITLE III\n\nHIGH-RISK AI SYSTEMS\n\nChapter 1\n\nCLASSIFICATION OF AI SYSTEMS AS HIGH-RISK\n\nArticle 6 \nClassification rules for high-risk AI systems\n\n1.Irrespective of whether an AI system is placed on the market or put into\nservice independently from the products referred to in points (a) and (b),\nthat AI system shall be considered high-risk where both of the following\nconditions are fulfilled:\n\n(a)the AI system is intended to be used as a safety component of a product, or\nis itself a product, covered by the Union harmonisation legislation listed in\nAnnex II;\n\n(b)the product whose safety component is the AI system, or the AI system\nitself as a product, is required to undergo a third-party conformity\nassessment with a view to the placing on the market or putting into service of\nthat product pursuant to the Union harmonisation legislation listed in Annex\nII.\n\n2.In addition to the high-risk AI systems referred to in paragraph 1, AI\nsystems referred to in Annex III shall also be considered high-risk.\n\nArticle 7 \nAmendments to Annex III\n\n1.The Commission is empowered to adopt delegated acts in accordance with\nArticle 73 to update the list in Annex III by adding high-risk AI systems\nwhere both of the following conditions are fulfilled:\n\n(a)the AI systems are intended to be used in any of the areas listed in points\n1 to 8 of Annex III;\n\n(b)the AI systems pose a risk of harm to the health and safety, or a risk of\nadverse impact on fundamental rights, that is, in respect of its severity and\nprobability of occurrence, equivalent to or greater than the risk of harm or\nof adverse impact posed by the high-risk AI systems already referred to in\nAnnex III.",
"65a44e8d-81fd-49ad-a762-0f03d26f54f3": "2.In addition to the high-risk AI systems referred to in paragraph 1, AI\nsystems referred to in Annex III shall also be considered high-risk.\n\nArticle 7 \nAmendments to Annex III\n\n1.The Commission is empowered to adopt delegated acts in accordance with\nArticle 73 to update the list in Annex III by adding high-risk AI systems\nwhere both of the following conditions are fulfilled:\n\n(a)the AI systems are intended to be used in any of the areas listed in points\n1 to 8 of Annex III;\n\n(b)the AI systems pose a risk of harm to the health and safety, or a risk of\nadverse impact on fundamental rights, that is, in respect of its severity and\nprobability of occurrence, equivalent to or greater than the risk of harm or\nof adverse impact posed by the high-risk AI systems already referred to in\nAnnex III.\n\n2.When assessing for the purposes of paragraph 1 whether an AI system poses a\nrisk of harm to the health and safety or a risk of adverse impact on\nfundamental rights that is equivalent to or greater than the risk of harm\nposed by the high-risk AI systems already referred to in Annex III, the\nCommission shall take into account the following criteria:\n\n(a)the intended purpose of the AI system;\n\n(b)the extent to which an AI system has been used or is likely to be used;\n\n(c)the extent to which the use of an AI system has already caused harm to the\nhealth and safety or adverse impact on the fundamental rights or has given\nrise to significant concerns in relation to the materialisation of such harm\nor adverse impact, as demonstrated by reports or documented allegations\nsubmitted to national competent authorities;\n\n(d)the potential extent of such harm or such adverse impact, in particular in\nterms of its intensity and its ability to affect a plurality of persons;\n\n(e)the extent to which potentially harmed or adversely impacted persons are\ndependent on the outcome produced with an AI system, in particular because for\npractical or legal reasons it is not reasonably possible to opt-out from that\noutcome;\n\n(f)the extent to which potentially harmed or adversely impacted persons are in\na vulnerable position in relation to the user of an AI system, in particular\ndue to an imbalance of power, knowledge, economic or social circumstances, or\nage;\n\n(g)the extent to which the outcome produced with an AI system is easily\nreversible, whereby outcomes having an impact on the health or safety of\npersons shall not be considered as easily reversible;\n\n(h)the extent to which existing Union legislation provides for:\n\n(i)effective measures of redress in relation to the risks posed by an AI\nsystem, with the exclusion of claims for damages;\n\n(ii)effective measures to prevent or substantially minimise those risks.\n\nChapter 2\n\nrequirements for high-risk Ai systems\n\nArticle 8 \nCompliance with the requirements\n\n1.High-risk AI systems shall comply with the requirements established in this\nChapter.\n\n2.The intended purpose of the high-risk AI system and the risk management\nsystem referred to in Article 9 shall be taken into account when ensuring\ncompliance with those requirements.\n\nArticle 9 \nRisk management system\n\n1.A risk management system shall be established, implemented, documented and\nmaintained in relation to high-risk AI systems.\n\n2.The risk management system shall consist of a continuous iterative process\nrun throughout the entire lifecycle of a high-risk AI system, requiring\nregular systematic updating. It shall comprise the following steps:\n\n(a)identification and analysis of the known and foreseeable risks associated\nwith each high-risk AI system;\n\n(b)estimation and evaluation of the risks that may emerge when the high-risk\nAI system is used in accordance with its intended purpose and under conditions\nof reasonably foreseeable misuse;\n\n(c)evaluation of other possibly arising risks based on the analysis of data\ngathered from the post-market monitoring system referred to in Article 61;\n\n(d)adoption of suitable risk management measures in accordance with the\nprovisions of the following paragraphs.\n\n3.The risk management measures referred to in paragraph 2, point (d) shall\ngive due consideration to the effects and possible interactions resulting from\nthe combined application of the requirements set out in this Chapter 2. They\nshall take into account the generally acknowledged state of the art, including\nas reflected in relevant harmonised standards or common specifications.\n\n4.The risk management measures referred to in paragraph 2, point (d) shall be\nsuch that any residual risk associated with each hazard as well as the overall\nresidual risk of the high-risk AI systems is judged acceptable, provided that\nthe high-risk AI system is used in accordance with its intended purpose or\nunder conditions of reasonably foreseeable misuse. Those residual risks shall\nbe communicated to the user.",
"0341e3ab-9d7c-4a28-8419-010a95c63b7f": "3.The risk management measures referred to in paragraph 2, point (d) shall\ngive due consideration to the effects and possible interactions resulting from\nthe combined application of the requirements set out in this Chapter 2. They\nshall take into account the generally acknowledged state of the art, including\nas reflected in relevant harmonised standards or common specifications.\n\n4.The risk management measures referred to in paragraph 2, point (d) shall be\nsuch that any residual risk associated with each hazard as well as the overall\nresidual risk of the high-risk AI systems is judged acceptable, provided that\nthe high-risk AI system is used in accordance with its intended purpose or\nunder conditions of reasonably foreseeable misuse. Those residual risks shall\nbe communicated to the user.\n\nIn identifying the most appropriate risk management measures, the following\nshall be ensured:\n\n(a)elimination or reduction of risks as far as possible through adequate\ndesign and development;\n\n(b)where appropriate, implementation of adequate mitigation and control\nmeasures in relation to risks that cannot be eliminated;\n\n(c)provision of adequate information pursuant to Article 13, in particular as\nregards the risks referred to in paragraph 2, point (b) of this Article, and,\nwhere appropriate, training to users.\n\nIn eliminating or reducing risks related to the use of the high-risk AI\nsystem, due consideration shall be given to the technical knowledge,\nexperience, education, training to be expected by the user and the environment\nin which the system is intended to be used.\n\n5.High-risk AI systems shall be tested for the purposes of identifying the\nmost appropriate risk management measures. Testing shall ensure that high-risk\nAI systems perform consistently for their intended purpose and they are in\ncompliance with the requirements set out in this Chapter.\n\n6.Testing procedures shall be suitable to achieve the intended purpose of the\nAI system and do not need to go beyond what is necessary to achieve that\npurpose.\n\n7.The testing of the high-risk AI systems shall be performed, as appropriate,\nat any point in time throughout the development process, and, in any event,\nprior to the placing on the market or the putting into service. Testing shall\nbe made against preliminarily defined metrics and probabilistic thresholds\nthat are appropriate to the intended purpose of the high-risk AI system.\n\n8.When implementing the risk management system described in paragraphs 1 to 7,\nspecific consideration shall be given to whether the high-risk AI system is\nlikely to be accessed by or have an impact on children.\n\n9.For credit institutions regulated by Directive 2013/36/EU, the aspects\ndescribed in paragraphs 1 to 8 shall be part of the risk management procedures\nestablished by those institutions pursuant to Article 74 of that Directive.\n\nArticle 10 \nData and data governance\n\n1.High-risk AI systems which make use of techniques involving the training of\nmodels with data shall be developed on the basis of training, validation and\ntesting data sets that meet the quality criteria referred to in paragraphs 2\nto 5.\n\n2.Training, validation and testing data sets shall be subject to appropriate\ndata governance and management practices. Those practices shall concern in\nparticular,\n\n(a)the relevant design choices;\n\n(b)data collection;\n\n(c)relevant data preparation processing operations, such as annotation,\nlabelling, cleaning, enrichment and aggregation;\n\n(d)the formulation of relevant assumptions, notably with respect to the\ninformation that the data are supposed to measure and represent;\n\n(e)a prior assessment of the availability, quantity and suitability of the\ndata sets that are needed;\n\n(f)examination in view of possible biases;\n\n(g)the identification of any possible data gaps or shortcomings, and how those\ngaps and shortcomings can be addressed.\n\n3.Training, validation and testing data sets shall be relevant,\nrepresentative, free of errors and complete. They shall have the appropriate\nstatistical properties, including, where applicable, as regards the persons or\ngroups of persons on which the high-risk AI system is intended to be used.\nThese characteristics of the data sets may be met at the level of individual\ndata sets or a combination thereof.\n\n4.Training, validation and testing data sets shall take into account, to the\nextent required by the intended purpose, the characteristics or elements that\nare particular to the specific geographical, behavioural or functional setting\nwithin which the high-risk AI system is intended to be used.",
"efbdacf3-d4ea-4e20-a609-04835d23da7b": "3.Training, validation and testing data sets shall be relevant,\nrepresentative, free of errors and complete. They shall have the appropriate\nstatistical properties, including, where applicable, as regards the persons or\ngroups of persons on which the high-risk AI system is intended to be used.\nThese characteristics of the data sets may be met at the level of individual\ndata sets or a combination thereof.\n\n4.Training, validation and testing data sets shall take into account, to the\nextent required by the intended purpose, the characteristics or elements that\nare particular to the specific geographical, behavioural or functional setting\nwithin which the high-risk AI system is intended to be used.\n\n5.To the extent that it is strictly necessary for the purposes of ensuring\nbias monitoring, detection and correction in relation to the high-risk AI\nsystems, the providers of such systems may process special categories of\npersonal data referred to in Article 9(1) of Regulation (EU) 2016/679, Article\n10 of Directive (EU) 2016/680 and Article 10(1) of Regulation (EU) 2018/1725,\nsubject to appropriate safeguards for the fundamental rights and freedoms of\nnatural persons, including technical limitations on the re-use and use of\nstate-of-the-art security and privacy-preserving measures, such as\npseudonymisation, or encryption where anonymisation may significantly affect\nthe purpose pursued.\n\n6.Appropriate data governance and management practices shall apply for the\ndevelopment of high-risk AI systems other than those which make use of\ntechniques involving the training of models in order to ensure that those\nhigh-risk AI systems comply with paragraph 2.\n\nArticle 11 \nTechnical documentation\n\n1.The technical documentation of a high-risk AI system shall be drawn up\nbefore that system is placed on the market or put into service and shall be\nkept up-to date.\n\nThe technical documentation shall be drawn up in such a way to demonstrate\nthat the high-risk AI system complies with the requirements set out in this\nChapter and provide national competent authorities and notified bodies with\nall the necessary information to assess the compliance of the AI system with\nthose requirements. It shall contain, at a minimum, the elements set out in\nAnnex IV.\n\n2.Where a high-risk AI system related to a product, to which the legal acts\nlisted in Annex II, section A apply, is placed on the market or put into\nservice one single technical documentation shall be drawn up containing all\nthe information set out in Annex IV as well as the information required under\nthose legal acts.\n\n3.The Commission is empowered to adopt delegated acts in accordance with\nArticle 73 to amend Annex IV where necessary to ensure that, in the light of\ntechnical progress, the technical documentation provides all the necessary\ninformation to assess the compliance of the system with the requirements set\nout in this Chapter.\n\nArticle 12 \nRecord-keeping\n\n1.High-risk AI systems shall be designed and developed with capabilities\nenabling the automatic recording of events (\u2018logs\u2019) while the high-risk AI\nsystems is operating. Those logging capabilities shall conform to recognised\nstandards or common specifications.\n\n2.The logging capabilities shall ensure a level of traceability of the AI\nsystem\u2019s functioning throughout its lifecycle that is appropriate to the\nintended purpose of the system.\n\n3.In particular, logging capabilities shall enable the monitoring of the\noperation of the high-risk AI system with respect to the occurrence of\nsituations that may result in the AI system presenting a risk within the\nmeaning of Article 65(1) or lead to a substantial modification, and facilitate\nthe post-market monitoring referred to in Article 61.\n\n4.For high-risk AI systems referred to in paragraph 1, point (a) of Annex III,\nthe logging capabilities shall provide, at a minimum:\n\n(a)recording of the period of each use of the system (start date and time and\nend date and time of each use);\n\n(b)the reference database against which input data has been checked by the\nsystem;\n\n(c)the input data for which the search has led to a match;\n\n(d)the identification of the natural persons involved in the verification of\nthe results, as referred to in Article 14 (5).\n\nArticle 13 \nTransparency and provision of information to users\n\n1.High-risk AI systems shall be designed and developed in such a way to ensure\nthat their operation is sufficiently transparent to enable users to interpret\nthe system\u2019s output and use it appropriately. An appropriate type and degree\nof transparency shall be ensured, with a view to achieving compliance with the\nrelevant obligations of the user and of the provider set out in Chapter 3 of\nthis Title.",
"998606e2-6710-4b67-97e0-3ff5068b4981": "Article 13 \nTransparency and provision of information to users\n\n1.High-risk AI systems shall be designed and developed in such a way to ensure\nthat their operation is sufficiently transparent to enable users to interpret\nthe system\u2019s output and use it appropriately. An appropriate type and degree\nof transparency shall be ensured, with a view to achieving compliance with the\nrelevant obligations of the user and of the provider set out in Chapter 3 of\nthis Title.\n\n2.High-risk AI systems shall be accompanied by instructions for use in an\nappropriate digital format or otherwise that include concise, complete,\ncorrect and clear information that is relevant, accessible and comprehensible\nto users.\n\n3.The information referred to in paragraph 2 shall specify:\n\n(a)the identity and the contact details of the provider and, where applicable,\nof its authorised representative;\n\n(b)the characteristics, capabilities and limitations of performance of the\nhigh-risk AI system, including:\n\n(i)its intended purpose;\n\n(ii)the level of accuracy, robustness and cybersecurity referred to in Article\n15 against which the high-risk AI system has been tested and validated and\nwhich can be expected, and any known and foreseeable circumstances that may\nhave an impact on that expected level of accuracy, robustness and\ncybersecurity;\n\n(iii)any known or foreseeable circumstance, related to the use of the high-\nrisk AI system in accordance with its intended purpose or under conditions of\nreasonably foreseeable misuse, which may lead to risks to the health and\nsafety or fundamental rights;\n\n(iv)its performance as regards the persons or groups of persons on which the\nsystem is intended to be used;\n\n(v)when appropriate, specifications for the input data, or any other relevant\ninformation in terms of the training, validation and testing data sets used,\ntaking into account the intended purpose of the AI system.\n\n(c)the changes to the high-risk AI system and its performance which have been\npre-determined by the provider at the moment of the initial conformity\nassessment, if any;\n\n(d)the human oversight measures referred to in Article 14, including the\ntechnical measures put in place to facilitate the interpretation of the\noutputs of AI systems by the users;\n\n(e)the expected lifetime of the high-risk AI system and any necessary\nmaintenance and care measures to ensure the proper functioning of that AI\nsystem, including as regards software updates.\n\nArticle 14 \nHuman oversight\n\n1.High-risk AI systems shall be designed and developed in such a way,\nincluding with appropriate human-machine interface tools, that they can be\neffectively overseen by natural persons during the period in which the AI\nsystem is in use.\n\n2.Human oversight shall aim at preventing or minimising the risks to health,\nsafety or fundamental rights that may emerge when a high-risk AI system is\nused in accordance with its intended purpose or under conditions of reasonably\nforeseeable misuse, in particular when such risks persist notwithstanding the\napplication of other requirements set out in this Chapter.\n\n3.Human oversight shall be ensured through either one or all of the following\nmeasures:\n\n(a)identified and built, when technically feasible, into the high-risk AI\nsystem by the provider before it is placed on the market or put into service;\n\n(b)identified by the provider before placing the high-risk AI system on the\nmarket or putting it into service and that are appropriate to be implemented\nby the user.\n\n4.The measures referred to in paragraph 3 shall enable the individuals to whom\nhuman oversight is assigned to do the following, as appropriate to the\ncircumstances:\n\n(a)fully understand the capacities and limitations of the high-risk AI system\nand be able to duly monitor its operation, so that signs of anomalies,\ndysfunctions and unexpected performance can be detected and addressed as soon\nas possible;\n\n(b)remain aware of the possible tendency of automatically relying or over-\nrelying on the output produced by a high-risk AI system (\u2018automation bias\u2019),\nin particular for high-risk AI systems used to provide information or\nrecommendations for decisions to be taken by natural persons;\n\n(c)be able to correctly interpret the high-risk AI system\u2019s output, taking\ninto account in particular the characteristics of the system and the\ninterpretation tools and methods available;\n\n(d)be able to decide, in any particular situation, not to use the high-risk AI\nsystem or otherwise disregard, override or reverse the output of the high-risk\nAI system;\n\n(e)be able to intervene on the operation of the high-risk AI system or\ninterrupt the system through a \u201cstop\u201d button or a similar procedure.",
"9cc2f66a-1157-4c0e-8d32-681202f99e42": "5.For high-risk AI systems referred to in point 1(a) of Annex III, the\nmeasures referred to in paragraph 3 shall be such as to ensure that, in\naddition, no action or decision is taken by the user on the basis of the\nidentification resulting from the system unless this has been verified and\nconfirmed by at least two natural persons.\n\nArticle 15 \nAccuracy, robustness and cybersecurity\n\n1.High-risk AI systems shall be designed and developed in such a way that they\nachieve, in the light of their intended purpose, an appropriate level of\naccuracy, robustness and cybersecurity, and perform consistently in those\nrespects throughout their lifecycle.\n\n2.The levels of accuracy and the relevant accuracy metrics of high-risk AI\nsystems shall be declared in the accompanying instructions of use.\n\n3.High-risk AI systems shall be resilient as regards errors, faults or\ninconsistencies that may occur within the system or the environment in which\nthe system operates, in particular due to their interaction with natural\npersons or other systems.\n\nThe robustness of high-risk AI systems may be achieved through technical\nredundancy solutions, which may include backup or fail-safe plans.\n\nHigh-risk AI systems that continue to learn after being placed on the market\nor put into service shall be developed in such a way to ensure that possibly\nbiased outputs due to outputs used as an input for future operations\n(\u2018feedback loops\u2019) are duly addressed with appropriate mitigation measures.\n\n4.High-risk AI systems shall be resilient as regards attempts by unauthorised\nthird parties to alter their use or performance by exploiting the system\nvulnerabilities.\n\nThe technical solutions aimed at ensuring the cybersecurity of high-risk AI\nsystems shall be appropriate to the relevant circumstances and the risks.\n\nThe technical solutions to address AI specific vulnerabilities shall include,\nwhere appropriate, measures to prevent and control for attacks trying to\nmanipulate the training dataset (\u2018data poisoning\u2019), inputs designed to cause\nthe model to make a mistake (\u2018adversarial examples\u2019), or model flaws.\n\nChapter 3\n\nOBLIGATIONS OF PROVIDERS AND USERS OF HIGH-RISK AI SYSTEMS and other parties\n\nArticle 16 \nObligations of providers of high-risk AI systems\n\nProviders of high-risk AI systems shall:\n\n(a)ensure that their high-risk AI systems are compliant with the requirements\nset out in Chapter 2 of this Title;\n\n(b)have a quality management system in place which complies with Article 17;\n\n(c)draw-up the technical documentation of the high-risk AI system;\n\n(d)when under their control, keep the logs automatically generated by their\nhigh-risk AI systems;\n\n(e)ensure that the high-risk AI system undergoes the relevant conformity\nassessment procedure, prior to its placing on the market or putting into\nservice;\n\n(f)comply with the registration obligations referred to in Article 51;\n\n(g)take the necessary corrective actions, if the high-risk AI system is not in\nconformity with the requirements set out in Chapter 2 of this Title;\n\n(h)inform the national competent authorities of the Member States in which\nthey made the AI system available or put it into service and, where\napplicable, the notified body of the non-compliance and of any corrective\nactions taken;\n\n(i)to affix the CE marking to their high-risk AI systems to indicate the\nconformity with this Regulation in accordance with Article 49;\n\n(j)upon request of a national competent authority, demonstrate the conformity\nof the high-risk AI system with the requirements set out in Chapter 2 of this\nTitle.\n\nArticle 17 \nQuality management system\n\n1.Providers of high-risk AI systems shall put a quality management system in\nplace that ensures compliance with this Regulation.",
"9f274be8-4aa1-4386-985d-7e58d339f184": "Article 17 \nQuality management system\n\n1.Providers of high-risk AI systems shall put a quality management system in\nplace that ensures compliance with this Regulation. That system shall be\ndocumented in a systematic and orderly manner in the form of written policies,\nprocedures and instructions, and shall include at least the following aspects:\n\n(a)a strategy for regulatory compliance, including compliance with conformity\nassessment procedures and procedures for the management of modifications to\nthe high-risk AI system;\n\n(b)techniques, procedures and systematic actions to be used for the design,\ndesign control and design verification of the high-risk AI system;\n\n(c)techniques, procedures and systematic actions to be used for the\ndevelopment, quality control and quality assurance of the high-risk AI system;\n\n(d)examination, test and validation procedures to be carried out before,\nduring and after the development of the high-risk AI system, and the frequency\nwith which they have to be carried out;\n\n(e)technical specifications, including standards, to be applied and, where the\nrelevant harmonised standards are not applied in full, the means to be used to\nensure that the high-risk AI system complies with the requirements set out in\nChapter 2 of this Title;\n\n(f)systems and procedures for data management, including data collection, data\nanalysis, data labelling, data storage, data filtration, data mining, data\naggregation, data retention and any other operation regarding the data that is\nperformed before and for the purposes of the placing on the market or putting\ninto service of high-risk AI systems;\n\n(g)the risk management system referred to in Article 9;\n\n(h)the setting-up, implementation and maintenance of a post-market monitoring\nsystem, in accordance with Article 61;\n\n(i)procedures related to the reporting of serious incidents and of\nmalfunctioning in accordance with Article 62;\n\n(j)the handling of communication with national competent authorities,\ncompetent authorities, including sectoral ones, providing or supporting the\naccess to data, notified bodies, other operators, customers or other\ninterested parties;\n\n(k)systems and procedures for record keeping of all relevant documentation and\ninformation;\n\n(l)resource management, including security of supply related measures;\n\n(m)an accountability framework setting out the responsibilities of the\nmanagement and other staff with regard to all aspects listed in this\nparagraph.\n\n2.The implementation of aspects referred to in paragraph 1 shall be\nproportionate to the size of the provider\u2019s organisation.\n\n3.For providers that are credit institutions regulated by Directive 2013/36/\nEU, the obligation to put a quality management system in place shall be deemed\nto be fulfilled by complying with the rules on internal governance\narrangements, processes and mechanisms pursuant to Article 74 of that\nDirective. In that context, any harmonised standards referred to in Article 40\nof this Regulation shall be taken into account.\n\nArticle 18 \nObligation to draw up technical documentation\n\n1.Providers of high-risk AI systems shall draw up the technical documentation\nreferred to in Article 11 in accordance with Annex IV.\n\n2.Providers that are credit institutions regulated by Directive 2013/36/EU\nshall maintain the technical documentation as part of the documentation\nconcerning internal governance, arrangements, processes and mechanisms\npursuant to Article 74 of that Directive.\n\nArticle 19 \nConformity assessment\n\n1.Providers of high-risk AI systems shall ensure that their systems undergo\nthe relevant conformity assessment procedure in accordance with Article 43,\nprior to their placing on the market or putting into service. Where the\ncompliance of the AI systems with the requirements set out in Chapter 2 of\nthis Title has been demonstrated following that conformity assessment, the\nproviders shall draw up an EU declaration of conformity in accordance with\nArticle 48 and affix the CE marking of conformity in accordance with Article\n49.\n\n2.For high-risk AI systems referred to in point 5(b) of Annex III that are\nplaced on the market or put into service by providers that are credit\ninstitutions regulated by Directive 2013/36/EU, the conformity assessment\nshall be carried out as part of the procedure referred to in Articles 97 to101\nof that Directive.\n\nArticle 20 \nAutomatically generated logs\n\n1.Providers of high-risk AI systems shall keep the logs automatically\ngenerated by their high-risk AI systems, to the extent such logs are under\ntheir control by virtue of a contractual arrangement with the user or\notherwise by law. The logs shall be kept for a period that is appropriate in\nthe light of the intended purpose of high-risk AI system and applicable legal\nobligations under Union or national law.",
"4024673d-b875-4fee-8312-b1beb9c375f9": "2.For high-risk AI systems referred to in point 5(b) of Annex III that are\nplaced on the market or put into service by providers that are credit\ninstitutions regulated by Directive 2013/36/EU, the conformity assessment\nshall be carried out as part of the procedure referred to in Articles 97 to101\nof that Directive.\n\nArticle 20 \nAutomatically generated logs\n\n1.Providers of high-risk AI systems shall keep the logs automatically\ngenerated by their high-risk AI systems, to the extent such logs are under\ntheir control by virtue of a contractual arrangement with the user or\notherwise by law. The logs shall be kept for a period that is appropriate in\nthe light of the intended purpose of high-risk AI system and applicable legal\nobligations under Union or national law.\n\n2.Providers that are credit institutions regulated by Directive 2013/36/EU\nshall maintain the logs automatically generated by their high-risk AI systems\nas part of the documentation under Articles 74 of that Directive.\n\nArticle 21 \nCorrective actions\n\nProviders of high-risk AI systems which consider or have reason to consider\nthat a high-risk AI system which they have placed on the market or put into\nservice is not in conformity with this Regulation shall immediately take the\nnecessary corrective actions to bring that system into conformity, to withdraw\nit or to recall it, as appropriate. They shall inform the distributors of the\nhigh-risk AI system in question and, where applicable, the authorised\nrepresentative and importers accordingly.\n\nArticle 22 \nDuty of information\n\nWhere the high-risk AI system presents a risk within the meaning of Article\n65(1) and that risk is known to the provider of the system, that provider\nshall immediately inform the national competent authorities of the Member\nStates in which it made the system available and, where applicable, the\nnotified body that issued a certificate for the high-risk AI system, in\nparticular of the non-compliance and of any corrective actions taken.\n\nArticle 23 \nCooperation with competent authorities\n\nProviders of high-risk AI systems shall, upon request by a national competent\nauthority, provide that authority with all the information and documentation\nnecessary to demonstrate the conformity of the high-risk AI system with the\nrequirements set out in Chapter 2 of this Title, in an official Union language\ndetermined by the Member State concerned. Upon a reasoned request from a\nnational competent authority, providers shall also give that authority access\nto the logs automatically generated by the high-risk AI system, to the extent\nsuch logs are under their control by virtue of a contractual arrangement with\nthe user or otherwise by law.\n\nArticle 24 \nObligations of product manufacturers\n\nWhere a high-risk AI system related to products to which the legal acts listed\nin Annex II, section A, apply, is placed on the market or put into service\ntogether with the product manufactured in accordance with those legal acts and\nunder the name of the product manufacturer, the manufacturer of the product\nshall take the responsibility of the compliance of the AI system with this\nRegulation and, as far as the AI system is concerned, have the same\nobligations imposed by the present Regulation on the provider.\n\nArticle 25 \nAuthorised representatives\n\n1.Prior to making their systems available on the Union market, where an\nimporter cannot be identified, providers established outside the Union shall,\nby written mandate, appoint an authorised representative which is established\nin the Union.\n\n2.The authorised representative shall perform the tasks specified in the\nmandate received from the provider. The mandate shall empower the authorised\nrepresentative to carry out the following tasks:\n\n(a)keep a copy of the EU declaration of conformity and the technical\ndocumentation at the disposal of the national competent authorities and\nnational authorities referred to in Article 63(7);\n\n(b)provide a national competent authority, upon a reasoned request, with all\nthe information and documentation necessary to demonstrate the conformity of a\nhigh-risk AI system with the requirements set out in Chapter 2 of this Title,\nincluding access to the logs automatically generated by the high-risk AI\nsystem to the extent such logs are under the control of the provider by virtue\nof a contractual arrangement with the user or otherwise by law;\n\n(c)cooperate with competent national authorities, upon a reasoned request, on\nany action the latter takes in relation to the high-risk AI system.",
"dbdda4ce-1bbe-4ede-aa99-3de01bd47851": "2.The authorised representative shall perform the tasks specified in the\nmandate received from the provider. The mandate shall empower the authorised\nrepresentative to carry out the following tasks:\n\n(a)keep a copy of the EU declaration of conformity and the technical\ndocumentation at the disposal of the national competent authorities and\nnational authorities referred to in Article 63(7);\n\n(b)provide a national competent authority, upon a reasoned request, with all\nthe information and documentation necessary to demonstrate the conformity of a\nhigh-risk AI system with the requirements set out in Chapter 2 of this Title,\nincluding access to the logs automatically generated by the high-risk AI\nsystem to the extent such logs are under the control of the provider by virtue\nof a contractual arrangement with the user or otherwise by law;\n\n(c)cooperate with competent national authorities, upon a reasoned request, on\nany action the latter takes in relation to the high-risk AI system.\n\nArticle 26 \nObligations of importers\n\n1.Before placing a high-risk AI system on the market, importers of such system\nshall ensure that:\n\n(a)the appropriate conformity assessment procedure has been carried out by the\nprovider of that AI system\n\n(b)the provider has drawn up the technical documentation in accordance with\nAnnex IV;\n\n(c)the system bears the required conformity marking and is accompanied by the\nrequired documentation and instructions of use.\n\n2.Where an importer considers or has reason to consider that a high-risk AI\nsystem is not in conformity with this Regulation, it shall not place that\nsystem on the market until that AI system has been brought into conformity.\nWhere the high-risk AI system presents a risk within the meaning of Article\n65(1), the importer shall inform the provider of the AI system and the market\nsurveillance authorities to that effect.\n\n3.Importers shall indicate their name, registered trade name or registered\ntrade mark, and the address at which they can be contacted on the high-risk AI\nsystem or, where that is not possible, on its packaging or its accompanying\ndocumentation, as applicable.\n\n4.Importers shall ensure that, while a high-risk AI system is under their\nresponsibility, where applicable, storage or transport conditions do not\njeopardise its compliance with the requirements set out in Chapter 2 of this\nTitle.\n\n5.Importers shall provide national competent authorities, upon a reasoned\nrequest, with all necessary information and documentation to demonstrate the\nconformity of a high-risk AI system with the requirements set out in Chapter 2\nof this Title in a language which can be easily understood by that national\ncompetent authority, including access to the logs automatically generated by\nthe high-risk AI system to the extent such logs are under the control of the\nprovider by virtue of a contractual arrangement with the user or otherwise by\nlaw. They shall also cooperate with those authorities on any action national\ncompetent authority takes in relation to that system.\n\nArticle 27 \nObligations of distributors\n\n1.Before making a high-risk AI system available on the market, distributors\nshall verify that the high-risk AI system bears the required CE conformity\nmarking, that it is accompanied by the required documentation and instruction\nof use, and that the provider and the importer of the system, as applicable,\nhave complied with the obligations set out in this Regulation.\n\n2.Where a distributor considers or has reason to consider that a high-risk AI\nsystem is not in conformity with the requirements set out in Chapter 2 of this\nTitle, it shall not make the high-risk AI system available on the market until\nthat system has been brought into conformity with those requirements.\nFurthermore, where the system presents a risk within the meaning of Article\n65(1), the distributor shall inform the provider or the importer of the\nsystem, as applicable, to that effect.\n\n3.Distributors shall ensure that, while a high-risk AI system is under their\nresponsibility, where applicable, storage or transport conditions do not\njeopardise the compliance of the system with the requirements set out in\nChapter 2 of this Title.\n\n4.A distributor that considers or has reason to consider that a high-risk AI\nsystem which it has made available on the market is not in conformity with the\nrequirements set out in Chapter 2 of this Title shall take the corrective\nactions necessary to bring that system into conformity with those\nrequirements, to withdraw it or recall it or shall ensure that the provider,\nthe importer or any relevant operator, as appropriate, takes those corrective\nactions. Where the high-risk AI system presents a risk within the meaning of\nArticle 65(1), the distributor shall immediately inform the national competent\nauthorities of the Member States in which it has made the product available to\nthat effect, giving details, in particular, of the non-compliance and of any\ncorrective actions taken.",
"17da604c-40a3-4f34-8308-8acb2427ceae": "4.A distributor that considers or has reason to consider that a high-risk AI\nsystem which it has made available on the market is not in conformity with the\nrequirements set out in Chapter 2 of this Title shall take the corrective\nactions necessary to bring that system into conformity with those\nrequirements, to withdraw it or recall it or shall ensure that the provider,\nthe importer or any relevant operator, as appropriate, takes those corrective\nactions. Where the high-risk AI system presents a risk within the meaning of\nArticle 65(1), the distributor shall immediately inform the national competent\nauthorities of the Member States in which it has made the product available to\nthat effect, giving details, in particular, of the non-compliance and of any\ncorrective actions taken.\n\n5.Upon a reasoned request from a national competent authority, distributors of\nhigh-risk AI systems shall provide that authority with all the information and\ndocumentation necessary to demonstrate the conformity of a high-risk system\nwith the requirements set out in Chapter 2 of this Title. Distributors shall\nalso cooperate with that national competent authority on any action taken by\nthat authority.\n\nArticle 28 \nObligations of distributors, importers, users or any other third-party\n\n1.Any distributor, importer, user or other third-party shall be considered a\nprovider for the purposes of this Regulation and shall be subject to the\nobligations of the provider under Article 16, in any of the following\ncircumstances:\n\n(a)they place on the market or put into service a high-risk AI system under\ntheir name or trademark;\n\n(b)they modify the intended purpose of a high-risk AI system already placed on\nthe market or put into service;\n\n(c)they make a substantial modification to the high-risk AI system.\n\n2.Where the circumstances referred to in paragraph 1, point (b) or (c), occur,\nthe provider that initially placed the high-risk AI system on the market or\nput it into service shall no longer be considered a provider for the purposes\nof this Regulation.\n\nArticle 29 \nObligations of users of high-risk AI systems\n\n1.Users of high-risk AI systems shall use such systems in accordance with the\ninstructions of use accompanying the systems, pursuant to paragraphs 2 and 5.\n\n2.The obligations in paragraph 1 are without prejudice to other user\nobligations under Union or national law and to the user\u2019s discretion in\norganising its own resources and activities for the purpose of implementing\nthe human oversight measures indicated by the provider.\n\n3.Without prejudice to paragraph 1, to the extent the user exercises control\nover the input data, that user shall ensure that input data is relevant in\nview of the intended purpose of the high-risk AI system.\n\n4.Users shall monitor the operation of the high-risk AI system on the basis of\nthe instructions of use. When they have reasons to consider that the use in\naccordance with the instructions of use may result in the AI system presenting\na risk within the meaning of Article 65(1) they shall inform the provider or\ndistributor and suspend the use of the system. They shall also inform the\nprovider or distributor when they have identified any serious incident or any\nmalfunctioning within the meaning of Article 62 and interrupt the use of the\nAI system. In case the user is not able to reach the provider, Article 62\nshall apply mutatis mutandis.\n\nFor users that are credit institutions regulated by Directive 2013/36/EU, the\nmonitoring obligation set out in the first subparagraph shall be deemed to be\nfulfilled by complying with the rules on internal governance arrangements,\nprocesses and mechanisms pursuant to Article 74 of that Directive.\n\n5.Users of high-risk AI systems shall keep the logs automatically generated by\nthat high-risk AI system, to the extent such logs are under their control. The\nlogs shall be kept for a period that is appropriate in the light of the\nintended purpose of the high-risk AI system and applicable legal obligations\nunder Union or national law.\n\nUsers that are credit institutions regulated by Directive 2013/36/EU shall\nmaintain the logs as part of the documentation concerning internal governance\narrangements, processes and mechanisms pursuant to Article 74 of that\nDirective.\n\n6.Users of high-risk AI systems shall use the information provided under\nArticle 13 to comply with their obligation to carry out a data protection\nimpact assessment under Article 35 of Regulation (EU) 2016/679 or Article 27\nof Directive (EU) 2016/680, where applicable.",
"bf244202-3dcc-41ff-b2c4-3212dcb17646": "5.Users of high-risk AI systems shall keep the logs automatically generated by\nthat high-risk AI system, to the extent such logs are under their control. The\nlogs shall be kept for a period that is appropriate in the light of the\nintended purpose of the high-risk AI system and applicable legal obligations\nunder Union or national law.\n\nUsers that are credit institutions regulated by Directive 2013/36/EU shall\nmaintain the logs as part of the documentation concerning internal governance\narrangements, processes and mechanisms pursuant to Article 74 of that\nDirective.\n\n6.Users of high-risk AI systems shall use the information provided under\nArticle 13 to comply with their obligation to carry out a data protection\nimpact assessment under Article 35 of Regulation (EU) 2016/679 or Article 27\nof Directive (EU) 2016/680, where applicable.\n\nChapter 4\n\nNOTIFIYING AUTHORITIES AND NOTIFIED BODIES\n\nArticle 30 \nNotifying authorities\n\n1.Each Member State shall designate or establish a notifying authority\nresponsible for setting up and carrying out the necessary procedures for the\nassessment, designation and notification of conformity assessment bodies and\nfor their monitoring.\n\n2.Member States may designate a national accreditation body referred to in\nRegulation (EC) No 765/2008 as a notifying authority.\n\n3.Notifying authorities shall be established, organised and operated in such a\nway that no conflict of interest arises with conformity assessment bodies and\nthe objectivity and impartiality of their activities are safeguarded.\n\n4.Notifying authorities shall be organised in such a way that decisions\nrelating to the notification of conformity assessment bodies are taken by\ncompetent persons different from those who carried out the assessment of those\nbodies.\n\n5.Notifying authorities shall not offer or provide any activities that\nconformity assessment bodies perform or any consultancy services on a\ncommercial or competitive basis.\n\n6.Notifying authorities shall safeguard the confidentiality of the information\nthey obtain.\n\n7.Notifying authorities shall have a sufficient number of competent personnel\nat their disposal for the proper performance of their tasks.\n\n8.Notifying authorities shall make sure that conformity assessments are\ncarried out in a proportionate manner, avoiding unnecessary burdens for\nproviders and that notified bodies perform their activities taking due account\nof the size of an undertaking, the sector in which it operates, its structure\nand the degree of complexity of the AI system in question.\n\nArticle 31 \nApplication of a conformity assessment body for notification\n\n1.Conformity assessment bodies shall submit an application for notification to\nthe notifying authority of the Member State in which they are established.\n\n2.The application for notification shall be accompanied by a description of\nthe conformity assessment activities, the conformity assessment module or\nmodules and the artificial intelligence technologies for which the conformity\nassessment body claims to be competent, as well as by an accreditation\ncertificate, where one exists, issued by a national accreditation body\nattesting that the conformity assessment body fulfils the requirements laid\ndown in Article 33. Any valid document related to existing designations of the\napplicant notified body under any other Union harmonisation legislation shall\nbe added.\n\n3.Where the conformity assessment body concerned cannot provide an\naccreditation certificate, it shall provide the notifying authority with the\ndocumentary evidence necessary for the verification, recognition and regular\nmonitoring of its compliance with the requirements laid down in Article 33.\nFor notified bodies which are designated under any other Union harmonisation\nlegislation, all documents and certificates linked to those designations may\nbe used to support their designation procedure under this Regulation, as\nappropriate.\n\nArticle 32 \nNotification procedure\n\n1.Notifying authorities may notify only conformity assessment bodies which\nhave satisfied the requirements laid down in Article 33.\n\n2.Notifying authorities shall notify the Commission and the other Member\nStates using the electronic notification tool developed and managed by the\nCommission.\n\n3.The notification shall include full details of the conformity assessment\nactivities, the conformity assessment module or modules and the artificial\nintelligence technologies concerned.\n\n4.The conformity assessment body concerned may perform the activities of a\nnotified body only where no objections are raised by the Commission or the\nother Member States within one month of a notification.\n\n5.Notifying authorities shall notify the Commission and the other Member\nStates of any subsequent relevant changes to the notification.\n\nArticle 33 \nNotified bodies\n\n1.Notified bodies shall verify the conformity of high-risk AI system in\naccordance with the conformity assessment procedures referred to in Article\n43.\n\n2.Notified bodies shall satisfy the organisational, quality management,\nresources and process requirements that are necessary to fulfil their tasks.\n\n3.The organisational structure, allocation of responsibilities, reporting\nlines and operation of notified bodies shall be such as to ensure that there\nis confidence in the performance by and in the results of the conformity\nassessment activities that the notified bodies conduct.",
"a081285b-90a6-4ec8-af54-757bc889574c": "4.The conformity assessment body concerned may perform the activities of a\nnotified body only where no objections are raised by the Commission or the\nother Member States within one month of a notification.\n\n5.Notifying authorities shall notify the Commission and the other Member\nStates of any subsequent relevant changes to the notification.\n\nArticle 33 \nNotified bodies\n\n1.Notified bodies shall verify the conformity of high-risk AI system in\naccordance with the conformity assessment procedures referred to in Article\n43.\n\n2.Notified bodies shall satisfy the organisational, quality management,\nresources and process requirements that are necessary to fulfil their tasks.\n\n3.The organisational structure, allocation of responsibilities, reporting\nlines and operation of notified bodies shall be such as to ensure that there\nis confidence in the performance by and in the results of the conformity\nassessment activities that the notified bodies conduct.\n\n4.Notified bodies shall be independent of the provider of a high-risk AI\nsystem in relation to which it performs conformity assessment activities.\nNotified bodies shall also be independent of any other operator having an\neconomic interest in the high-risk AI system that is assessed, as well as of\nany competitors of the provider.\n\n5.Notified bodies shall be organised and operated so as to safeguard the\nindependence, objectivity and impartiality of their activities. Notified\nbodies shall document and implement a structure and procedures to safeguard\nimpartiality and to promote and apply the principles of impartiality\nthroughout their organisation, personnel and assessment activities.\n\n6.Notified bodies shall have documented procedures in place ensuring that\ntheir personnel, committees, subsidiaries, subcontractors and any associated\nbody or personnel of external bodies respect the confidentiality of the\ninformation which comes into their possession during the performance of\nconformity assessment activities, except when disclosure is required by law.\nThe staff of notified bodies shall be bound to observe professional secrecy\nwith regard to all information obtained in carrying out their tasks under this\nRegulation, except in relation to the notifying authorities of the Member\nState in which their activities are carried out.\n\n7.Notified bodies shall have procedures for the performance of activities\nwhich take due account of the size of an undertaking, the sector in which it\noperates, its structure, the degree of complexity of the AI system in\nquestion.\n\n8.Notified bodies shall take out appropriate liability insurance for their\nconformity assessment activities, unless liability is assumed by the Member\nState concerned in accordance with national law or that Member State is\ndirectly responsible for the conformity assessment.\n\n9.Notified bodies shall be capable of carrying out all the tasks falling to\nthem under this Regulation with the highest degree of professional integrity\nand the requisite competence in the specific field, whether those tasks are\ncarried out by notified bodies themselves or on their behalf and under their\nresponsibility.\n\n10.Notified bodies shall have sufficient internal competences to be able to\neffectively evaluate the tasks conducted by external parties on their behalf.\nTo that end, at all times and for each conformity assessment procedure and\neach type of high-risk AI system in relation to which they have been\ndesignated, the notified body shall have permanent availability of sufficient\nadministrative, technical and scientific personnel who possess experience and\nknowledge relating to the relevant artificial intelligence technologies, data\nand data computing and to the requirements set out in Chapter 2 of this Title.\n\n11.Notified bodies shall participate in coordination activities as referred to\nin Article 38. They shall also take part directly or be represented in\nEuropean standardisation organisations, or ensure that they are aware and up\nto date in respect of relevant standards.\n\n12.Notified bodies shall make available and submit upon request all relevant\ndocumentation, including the providers\u2019 documentation, to the notifying\nauthority referred to in Article 30 to allow it to conduct its assessment,\ndesignation, notification, monitoring and surveillance activities and to\nfacilitate the assessment outlined in this Chapter.\n\nArticle 34 \nSubsidiaries of and subcontracting by notified bodies\n\n1.Where a notified body subcontracts specific tasks connected with the\nconformity assessment or has recourse to a subsidiary, it shall ensure that\nthe subcontractor or the subsidiary meets the requirements laid down in\nArticle 33 and shall inform the notifying authority accordingly.\n\n2.Notified bodies shall take full responsibility for the tasks performed by\nsubcontractors or subsidiaries wherever these are established.\n\n3.Activities may be subcontracted or carried out by a subsidiary only with the\nagreement of the provider.\n\n4.Notified bodies shall keep at the disposal of the notifying authority the\nrelevant documents concerning the assessment of the qualifications of the\nsubcontractor or the subsidiary and the work carried out by them under this\nRegulation.\n\nArticle 35 \nIdentification numbers and lists of notified bodies designated under this\nRegulation\n\n1.The Commission shall assign an identification number to notified bodies.",
"1101bd69-cec5-4620-887c-9c1eb642cb18": "Article 34 \nSubsidiaries of and subcontracting by notified bodies\n\n1.Where a notified body subcontracts specific tasks connected with the\nconformity assessment or has recourse to a subsidiary, it shall ensure that\nthe subcontractor or the subsidiary meets the requirements laid down in\nArticle 33 and shall inform the notifying authority accordingly.\n\n2.Notified bodies shall take full responsibility for the tasks performed by\nsubcontractors or subsidiaries wherever these are established.\n\n3.Activities may be subcontracted or carried out by a subsidiary only with the\nagreement of the provider.\n\n4.Notified bodies shall keep at the disposal of the notifying authority the\nrelevant documents concerning the assessment of the qualifications of the\nsubcontractor or the subsidiary and the work carried out by them under this\nRegulation.\n\nArticle 35 \nIdentification numbers and lists of notified bodies designated under this\nRegulation\n\n1.The Commission shall assign an identification number to notified bodies. It\nshall assign a single number, even where a body is notified under several\nUnion acts.\n\n2.The Commission shall make publicly available the list of the bodies notified\nunder this Regulation, including the identification numbers that have been\nassigned to them and the activities for which they have been notified. The\nCommission shall ensure that the list is kept up to date.\n\nArticle 36 \nChanges to notifications\n\n1.Where a notifying authority has suspicions or has been informed that a\nnotified body no longer meets the requirements laid down in Article 33, or\nthat it is failing to fulfil its obligations, that authority shall without\ndelay investigate the matter with the utmost diligence. In that context, it\nshall inform the notified body concerned about the objections raised and give\nit the possibility to make its views known. If the notifying authority comes\nto the conclusion that the notified body investigation no longer meets the\nrequirements laid down in Article 33 or that it is failing to fulfil its\nobligations, it shall restrict, suspend or withdraw the notification as\nappropriate, depending on the seriousness of the failure. It shall also\nimmediately inform the Commission and the other Member States accordingly.\n\n2.In the event of restriction, suspension or withdrawal of notification, or\nwhere the notified body has ceased its activity, the notifying authority shall\ntake appropriate steps to ensure that the files of that notified body are\neither taken over by another notified body or kept available for the\nresponsible notifying authorities at their request.\n\nArticle 37 \nChallenge to the competence of notified bodies\n\n1.The Commission shall, where necessary, investigate all cases where there are\nreasons to doubt whether a notified body complies with the requirements laid\ndown in Article 33.\n\n2.The Notifying authority shall provide the Commission, on request, with all\nrelevant information relating to the notification of the notified body\nconcerned.\n\n3.The Commission shall ensure that all confidential information obtained in\nthe course of its investigations pursuant to this Article is treated\nconfidentially.\n\n4.Where the Commission ascertains that a notified body does not meet or no\nlonger meets the requirements laid down in Article 33, it shall adopt a\nreasoned decision requesting the notifying Member State to take the necessary\ncorrective measures, including withdrawal of notification if necessary. That\nimplementing act shall be adopted in accordance with the examination procedure\nreferred to in Article 74(2).\n\nArticle 38 \nCoordination of notified bodies\n\n1.The Commission shall ensure that, with regard to the areas covered by this\nRegulation, appropriate coordination and cooperation between notified bodies\nactive in the conformity assessment procedures of AI systems pursuant to this\nRegulation are put in place and properly operated in the form of a sectoral\ngroup of notified bodies.\n\n2.Member States shall ensure that the bodies notified by them participate in\nthe work of that group, directly or by means of designated representatives.\n\nArticle 39 \nConformity assessment bodies of third countries\n\nConformity assessment bodies established under the law of a third country with\nwhich the Union has concluded an agreement may be authorised to carry out the\nactivities of notified Bodies under this Regulation.\n\nChapter 5\n\nSTANDARDS, CONFORMITY ASSESSMENT, CERTIFICATES, REGISTRATION\n\nArticle 40 \nHarmonised standards\n\nHigh-risk AI systems which are in conformity with harmonised standards or\nparts thereof the references of which have been published in the Official\nJournal of the European Union shall be presumed to be in conformity with the\nrequirements set out in Chapter 2 of this Title, to the extent those standards\ncover those requirements.",
"6baa1ddc-34a1-4064-8efb-f7752d6abea9": "2.Member States shall ensure that the bodies notified by them participate in\nthe work of that group, directly or by means of designated representatives.\n\nArticle 39 \nConformity assessment bodies of third countries\n\nConformity assessment bodies established under the law of a third country with\nwhich the Union has concluded an agreement may be authorised to carry out the\nactivities of notified Bodies under this Regulation.\n\nChapter 5\n\nSTANDARDS, CONFORMITY ASSESSMENT, CERTIFICATES, REGISTRATION\n\nArticle 40 \nHarmonised standards\n\nHigh-risk AI systems which are in conformity with harmonised standards or\nparts thereof the references of which have been published in the Official\nJournal of the European Union shall be presumed to be in conformity with the\nrequirements set out in Chapter 2 of this Title, to the extent those standards\ncover those requirements.\n\nArticle 41 \nCommon specifications\n\n1.Where harmonised standards referred to in Article 40 do not exist or where\nthe Commission considers that the relevant harmonised standards are\ninsufficient or that there is a need to address specific safety or fundamental\nright concerns, the Commission may, by means of implementing acts, adopt\ncommon specifications in respect of the requirements set out in Chapter 2 of\nthis Title. Those implementing acts shall be adopted in accordance with the\nexamination procedure referred to in Article 74(2).\n\n2.The Commission, when preparing the common specifications referred to in\nparagraph 1, shall gather the views of relevant bodies or expert groups\nestablished under relevant sectorial Union law.\n\n3.High-risk AI systems which are in conformity with the common specifications\nreferred to in paragraph 1 shall be presumed to be in conformity with the\nrequirements set out in Chapter 2 of this Title, to the extent those common\nspecifications cover those requirements.\n\n4.Where providers do not comply with the common specifications referred to in\nparagraph 1, they shall duly justify that they have adopted technical\nsolutions that are at least equivalent thereto.\n\nArticle 42 \nPresumption of conformity with certain requirements\n\n1.Taking into account their intended purpose, high-risk AI systems that have\nbeen trained and tested on data concerning the specific geographical,\nbehavioural and functional setting within which they are intended to be used\nshall be presumed to be in compliance with the requirement set out in Article\n10(4).\n\n2.High-risk AI systems that have been certified or for which a statement of\nconformity has been issued under a cybersecurity scheme pursuant to Regulation\n(EU) 2019/881 of the European Parliament and of the Council 63 and the\nreferences of which have been published in the Official Journal of the\nEuropean Union shall be presumed to be in compliance with the cybersecurity\nrequirements set out in Article 15 of this Regulation in so far as the\ncybersecurity certificate or statement of conformity or parts thereof cover\nthose requirements.\n\nArticle 43 \nConformity assessment\n\n1.For high-risk AI systems listed in point 1 of Annex III, where, in\ndemonstrating the compliance of a high-risk AI system with the requirements\nset out in Chapter 2 of this Title, the provider has applied harmonised\nstandards referred to in Article 40, or, where applicable, common\nspecifications referred to in Article 41, the provider shall follow one of the\nfollowing procedures:\n\n(a)the conformity assessment procedure based on internal control referred to\nin Annex VI;\n\n(b)the conformity assessment procedure based on assessment of the quality\nmanagement system and assessment of the technical documentation, with the\ninvolvement of a notified body, referred to in Annex VII.\n\nWhere, in demonstrating the compliance of a high-risk AI system with the\nrequirements set out in Chapter 2 of this Title, the provider has not applied\nor has applied only in part harmonised standards referred to in Article 40, or\nwhere such harmonised standards do not exist and common specifications\nreferred to in Article 41 are not available, the provider shall follow the\nconformity assessment procedure set out in Annex VII.\n\nFor the purpose of the conformity assessment procedure referred to in Annex\nVII, the provider may choose any of the notified bodies. However, when the\nsystem is intended to be put into service by law enforcement, immigration or\nasylum authorities as well as EU institutions, bodies or agencies, the market\nsurveillance authority referred to in Article 63(5) or (6), as applicable,\nshall act as a notified body.\n\n2.For high-risk AI systems referred to in points 2 to 8 of Annex III,\nproviders shall follow the conformity assessment procedure based on internal\ncontrol as referred to in Annex VI, which does not provide for the involvement\nof a notified body.",
"e4ed2eb8-1551-4cf3-8341-78dc99b0c3f6": "For the purpose of the conformity assessment procedure referred to in Annex\nVII, the provider may choose any of the notified bodies. However, when the\nsystem is intended to be put into service by law enforcement, immigration or\nasylum authorities as well as EU institutions, bodies or agencies, the market\nsurveillance authority referred to in Article 63(5) or (6), as applicable,\nshall act as a notified body.\n\n2.For high-risk AI systems referred to in points 2 to 8 of Annex III,\nproviders shall follow the conformity assessment procedure based on internal\ncontrol as referred to in Annex VI, which does not provide for the involvement\nof a notified body. For high-risk AI systems referred to in point 5(b) of\nAnnex III, placed on the market or put into service by credit institutions\nregulated by Directive 2013/36/EU, the conformity assessment shall be carried\nout as part of the procedure referred to in Articles 97 to101 of that\nDirective.\n\n3.For high-risk AI systems, to which legal acts listed in Annex II, section A,\napply, the provider shall follow the relevant conformity assessment as\nrequired under those legal acts. The requirements set out in Chapter 2 of this\nTitle shall apply to those high-risk AI systems and shall be part of that\nassessment. Points 4.3., 4.4., 4.5. and the fifth paragraph of point 4.6 of\nAnnex VII shall also apply.\n\nFor the purpose of that assessment, notified bodies which have been notified\nunder those legal acts shall be entitled to control the conformity of the\nhigh-risk AI systems with the requirements set out in Chapter 2 of this Title,\nprovided that the compliance of those notified bodies with requirements laid\ndown in Article 33(4), (9) and (10) has been assessed in the context of the\nnotification procedure under those legal acts.\n\nWhere the legal acts listed in Annex II, section A, enable the manufacturer of\nthe product to opt out from a third-party conformity assessment, provided that\nthat manufacturer has applied all harmonised standards covering all the\nrelevant requirements, that manufacturer may make use of that option only if\nhe has also applied harmonised standards or, where applicable, common\nspecifications referred to in Article 41, covering the requirements set out in\nChapter 2 of this Title.\n\n4.High-risk AI systems shall undergo a new conformity assessment procedure\nwhenever they are substantially modified, regardless of whether the modified\nsystem is intended to be further distributed or continues to be used by the\ncurrent user.\n\nFor high-risk AI systems that continue to learn after being placed on the\nmarket or put into service, changes to the high-risk AI system and its\nperformance that have been pre-determined by the provider at the moment of the\ninitial conformity assessment and are part of the information contained in the\ntechnical documentation referred to in point 2(f) of Annex IV, shall not\nconstitute a substantial modification.\n\n5.The Commission is empowered to adopt delegated acts in accordance with\nArticle 73 for the purpose of updating Annexes VI and Annex VII in order to\nintroduce elements of the conformity assessment procedures that become\nnecessary in light of technical progress.\n\n6.The Commission is empowered to adopt delegated acts to amend paragraphs 1\nand 2 in order to subject high-risk AI systems referred to in points 2 to 8 of\nAnnex III to the conformity assessment procedure referred to in Annex VII or\nparts thereof. The Commission shall adopt such delegated acts taking into\naccount the effectiveness of the conformity assessment procedure based on\ninternal control referred to in Annex VI in preventing or minimizing the risks\nto health and safety and protection of fundamental rights posed by such\nsystems as well as the availability of adequate capacities and resources among\nnotified bodies.\n\nArticle 44 \nCertificates\n\n1.Certificates issued by notified bodies in accordance with Annex VII shall be\ndrawn-up in an official Union language determined by the Member State in which\nthe notified body is established or in an official Union language otherwise\nacceptable to the notified body.\n\n2.Certificates shall be valid for the period they indicate, which shall not\nexceed five years. On application by the provider, the validity of a\ncertificate may be extended for further periods, each not exceeding five\nyears, based on a re-assessment in accordance with the applicable conformity\nassessment procedures.\n\n3.Where a notified body finds that an AI system no longer meets the\nrequirements set out in Chapter 2 of this Title, it shall, taking account of\nthe principle of proportionality, suspend or withdraw the certificate issued\nor impose any restrictions on it, unless compliance with those requirements is\nensured by appropriate corrective action taken by the provider of the system\nwithin an appropriate deadline set by the notified body.",
"ad01a621-97c0-4698-be0b-4338399f22bc": "2.Certificates shall be valid for the period they indicate, which shall not\nexceed five years. On application by the provider, the validity of a\ncertificate may be extended for further periods, each not exceeding five\nyears, based on a re-assessment in accordance with the applicable conformity\nassessment procedures.\n\n3.Where a notified body finds that an AI system no longer meets the\nrequirements set out in Chapter 2 of this Title, it shall, taking account of\nthe principle of proportionality, suspend or withdraw the certificate issued\nor impose any restrictions on it, unless compliance with those requirements is\nensured by appropriate corrective action taken by the provider of the system\nwithin an appropriate deadline set by the notified body. The notified body\nshall give reasons for its decision.\n\nArticle 45 \nAppeal against decisions of notified bodies\n\nMember States shall ensure that an appeal procedure against decisions of the\nnotified bodies is available to parties having a legitimate interest in that\ndecision.\n\nArticle 46 \nInformation obligations of notified bodies\n\n1.Notified bodies shall inform the notifying authority of the following:\n\n(a)any Union technical documentation assessment certificates, any supplements\nto those certificates, quality management system approvals issued in\naccordance with the requirements of Annex VII;\n\n(b)any refusal, restriction, suspension or withdrawal of a Union technical\ndocumentation assessment certificate or a quality management system approval\nissued in accordance with the requirements of Annex VII;\n\n(c)any circumstances affecting the scope of or conditions for notification;\n\n(d)any request for information which they have received from market\nsurveillance authorities regarding conformity assessment activities;\n\n(e)on request, conformity assessment activities performed within the scope of\ntheir notification and any other activity performed, including cross-border\nactivities and subcontracting.\n\n2.Each notified body shall inform the other notified bodies of:\n\n(a)quality management system approvals which it has refused, suspended or\nwithdrawn, and, upon request, of quality system approvals which it has issued;\n\n(b)EU technical documentation assessment certificates or any supplements\nthereto which it has refused, withdrawn, suspended or otherwise restricted,\nand, upon request, of the certificates and/or supplements thereto which it has\nissued.\n\n3.Each notified body shall provide the other notified bodies carrying out\nsimilar conformity assessment activities covering the same artificial\nintelligence technologies with relevant information on issues relating to\nnegative and, on request, positive conformity assessment results.\n\nArticle 47 \nDerogation from conformity assessment procedure\n\n1.By way of derogation from Article 43, any market surveillance authority may\nauthorise the placing on the market or putting into service of specific high-\nrisk AI systems within the territory of the Member State concerned, for\nexceptional reasons of public security or the protection of life and health of\npersons, environmental protection and the protection of key industrial and\ninfrastructural assets. That authorisation shall be for a limited period of\ntime, while the necessary conformity assessment procedures are being carried\nout, and shall terminate once those procedures have been completed. The\ncompletion of those procedures shall be undertaken without undue delay.\n\n2.The authorisation referred to in paragraph 1 shall be issued only if the\nmarket surveillance authority concludes that the high-risk AI system complies\nwith the requirements of Chapter 2 of this Title. The market surveillance\nauthority shall inform the Commission and the other Member States of any\nauthorisation issued pursuant to paragraph 1.\n\n3.Where, within 15 calendar days of receipt of the information referred to in\nparagraph 2, no objection has been raised by either a Member State or the\nCommission in respect of an authorisation issued by a market surveillance\nauthority of a Member State in accordance with paragraph 1, that authorisation\nshall be deemed justified.\n\n4.Where, within 15 calendar days of receipt of the notification referred to in\nparagraph 2, objections are raised by a Member State against an authorisation\nissued by a market surveillance authority of another Member State, or where\nthe Commission considers the authorisation to be contrary to Union law or the\nconclusion of the Member States regarding the compliance of the system as\nreferred to in paragraph 2 to be unfounded, the Commission shall without delay\nenter into consultation with the relevant Member State; the operator(s)\nconcerned shall be consulted and have the possibility to present their views.\nIn view thereof, the Commission shall decide whether the authorisation is\njustified or not. The Commission shall address its decision to the Member\nState concerned and the relevant operator or operators.\n\n5.If the authorisation is considered unjustified, this shall be withdrawn by\nthe market surveillance authority of the Member State concerned.",
"3df81ab3-1d02-4e96-8b73-3e14bcf90079": "4.Where, within 15 calendar days of receipt of the notification referred to in\nparagraph 2, objections are raised by a Member State against an authorisation\nissued by a market surveillance authority of another Member State, or where\nthe Commission considers the authorisation to be contrary to Union law or the\nconclusion of the Member States regarding the compliance of the system as\nreferred to in paragraph 2 to be unfounded, the Commission shall without delay\nenter into consultation with the relevant Member State; the operator(s)\nconcerned shall be consulted and have the possibility to present their views.\nIn view thereof, the Commission shall decide whether the authorisation is\njustified or not. The Commission shall address its decision to the Member\nState concerned and the relevant operator or operators.\n\n5.If the authorisation is considered unjustified, this shall be withdrawn by\nthe market surveillance authority of the Member State concerned.\n\n6.By way of derogation from paragraphs 1 to 5, for high-risk AI systems\nintended to be used as safety components of devices, or which are themselves\ndevices, covered by Regulation (EU) 2017/745 and Regulation (EU) 2017/746,\nArticle 59 of Regulation (EU) 2017/745 and Article 54 of Regulation (EU)\n2017/746 shall apply also with regard to the derogation from the conformity\nassessment of the compliance with the requirements set out in Chapter 2 of\nthis Title.\n\nArticle 48 \nEU declaration of conformity\n\n1.The provider shall draw up a written EU declaration of conformity for each\nAI system and keep it at the disposal of the national competent authorities\nfor 10 years after the AI system has been placed on the market or put into\nservice. The EU declaration of conformity shall identify the AI system for\nwhich it has been drawn up. A copy of the EU declaration of conformity shall\nbe given to the relevant national competent authorities upon request.\n\n2.The EU declaration of conformity shall state that the high-risk AI system in\nquestion meets the requirements set out in Chapter 2 of this Title. The EU\ndeclaration of conformity shall contain the information set out in Annex V and\nshall be translated into an official Union language or languages required by\nthe Member State(s) in which the high-risk AI system is made available.\n\n3.Where high-risk AI systems are subject to other Union harmonisation\nlegislation which also requires an EU declaration of conformity, a single EU\ndeclaration of conformity shall be drawn up in respect of all Union\nlegislations applicable to the high-risk AI system. The declaration shall\ncontain all the information required for identification of the Union\nharmonisation legislation to which the declaration relates.\n\n4.By drawing up the EU declaration of conformity, the provider shall assume\nresponsibility for compliance with the requirements set out in Chapter 2 of\nthis Title. The provider shall keep the EU declaration of conformity up-to-\ndate as appropriate.\n\n5.The Commission shall be empowered to adopt delegated acts in accordance with\nArticle 73 for the purpose of updating the content of the EU declaration of\nconformity set out in Annex V in order to introduce elements that become\nnecessary in light of technical progress.\n\nArticle 49 \nCE marking of conformity\n\n1.The CE marking shall be affixed visibly, legibly and indelibly for high-risk\nAI systems. Where that is not possible or not warranted on account of the\nnature of the high-risk AI system, it shall be affixed to the packaging or to\nthe accompanying documentation, as appropriate.\n\n2.The CE marking referred to in paragraph 1 of this Article shall be subject\nto the general principles set out in Article 30 of Regulation (EC) No\n765/2008.\n\n3.Where applicable, the CE marking shall be followed by the identification\nnumber of the notified body responsible for the conformity assessment\nprocedures set out in Article 43. The identification number shall also be\nindicated in any promotional material which mentions that the high-risk AI\nsystem fulfils the requirements for CE marking.\n\nArticle 50 \nDocument retention\n\nThe provider shall, for a period ending 10 years after the AI system has been\nplaced on the market or put into service, keep at the disposal of the national\ncompetent authorities:\n\n(a)the technical documentation referred to in Article 11;\n\n(b)the documentation concerning the quality management system referred to\nArticle 17;\n\n(c)the documentation concerning the changes approved by notified bodies where\napplicable;\n\n(d)the decisions and other documents issued by the notified bodies where\napplicable;\n\n(e)the EU declaration of conformity referred to in Article 48.",
"6dc28c03-3dd4-473c-8c03-19daed9ce7be": "3.Where applicable, the CE marking shall be followed by the identification\nnumber of the notified body responsible for the conformity assessment\nprocedures set out in Article 43. The identification number shall also be\nindicated in any promotional material which mentions that the high-risk AI\nsystem fulfils the requirements for CE marking.\n\nArticle 50 \nDocument retention\n\nThe provider shall, for a period ending 10 years after the AI system has been\nplaced on the market or put into service, keep at the disposal of the national\ncompetent authorities:\n\n(a)the technical documentation referred to in Article 11;\n\n(b)the documentation concerning the quality management system referred to\nArticle 17;\n\n(c)the documentation concerning the changes approved by notified bodies where\napplicable;\n\n(d)the decisions and other documents issued by the notified bodies where\napplicable;\n\n(e)the EU declaration of conformity referred to in Article 48.\n\nArticle 51 \nRegistration\n\nBefore placing on the market or putting into service a high-risk AI system\nreferred to in Article 6(2), the provider or, where applicable, the authorised\nrepresentative shall register that system in the EU database referred to in\nArticle 60.\n\nTITLE IV\n\nTRANSPARENCY OBLIGATIONS FOR CERTAIN AI SYSTEMS\n\nArticle 52 \nTransparency obligations for certain AI systems\n\n1.Providers shall ensure that AI systems intended to interact with natural\npersons are designed and developed in such a way that natural persons are\ninformed that they are interacting with an AI system, unless this is obvious\nfrom the circumstances and the context of use. This obligation shall not apply\nto AI systems authorised by law to detect, prevent, investigate and prosecute\ncriminal offences, unless those systems are available for the public to report\na criminal offence.\n\n2.Users of an emotion recognition system or a biometric categorisation system\nshall inform of the operation of the system the natural persons exposed\nthereto. This obligation shall not apply to AI systems used for biometric\ncategorisation, which are permitted by law to detect, prevent and investigate\ncriminal offences.\n\n3.Users of an AI system that generates or manipulates image, audio or video\ncontent that appreciably resembles existing persons, objects, places or other\nentities or events and would falsely appear to a person to be authentic or\ntruthful (\u2018deep fake\u2019), shall disclose that the content has been artificially\ngenerated or manipulated.\n\nHowever, the first subparagraph shall not apply where the use is authorised by\nlaw to detect, prevent, investigate and prosecute criminal offences or it is\nnecessary for the exercise of the right to freedom of expression and the right\nto freedom of the arts and sciences guaranteed in the Charter of Fundamental\nRights of the EU, and subject to appropriate safeguards for the rights and\nfreedoms of third parties.\n\n4.Paragraphs 1, 2 and 3 shall not affect the requirements and obligations set\nout in Title III of this Regulation.\n\ntitle v\n\nMEASURES IN SUPPORT OF INNOVATION\n\nArticle 53 \nAI regulatory sandboxes\n\n1.AI regulatory sandboxes established by one or more Member States competent\nauthorities or the European Data Protection Supervisor shall provide a\ncontrolled environment that facilitates the development, testing and\nvalidation of innovative AI systems for a limited time before their placement\non the market or putting into service pursuant to a specific plan. This shall\ntake place under the direct supervision and guidance by the competent\nauthorities with a view to ensuring compliance with the requirements of this\nRegulation and, where relevant, other Union and Member States legislation\nsupervised within the sandbox.\n\n2.Member States shall ensure that to the extent the innovative AI systems\ninvolve the processing of personal data or otherwise fall under the\nsupervisory remit of other national authorities or competent authorities\nproviding or supporting access to data, the national data protection\nauthorities and those other national authorities are associated to the\noperation of the AI regulatory sandbox.\n\n3.The AI regulatory sandboxes shall not affect the supervisory and corrective\npowers of the competent authorities. Any significant risks to health and\nsafety and fundamental rights identified during the development and testing of\nsuch systems shall result in immediate mitigation and, failing that, in the\nsuspension of the development and testing process until such mitigation takes\nplace.\n\n4.Participants in the AI regulatory sandbox shall remain liable under\napplicable Union and Member States liability legislation for any harm\ninflicted on third parties as a result from the experimentation taking place\nin the sandbox.\n\n5.Member States\u2019 competent authorities that have established AI regulatory\nsandboxes shall coordinate their activities and cooperate within the framework\nof the European Artificial Intelligence Board.",
"b1f8a5c7-5d43-4e3d-9c41-114aee6eb9e0": "3.The AI regulatory sandboxes shall not affect the supervisory and corrective\npowers of the competent authorities. Any significant risks to health and\nsafety and fundamental rights identified during the development and testing of\nsuch systems shall result in immediate mitigation and, failing that, in the\nsuspension of the development and testing process until such mitigation takes\nplace.\n\n4.Participants in the AI regulatory sandbox shall remain liable under\napplicable Union and Member States liability legislation for any harm\ninflicted on third parties as a result from the experimentation taking place\nin the sandbox.\n\n5.Member States\u2019 competent authorities that have established AI regulatory\nsandboxes shall coordinate their activities and cooperate within the framework\nof the European Artificial Intelligence Board. They shall submit annual\nreports to the Board and the Commission on the results from the implementation\nof those scheme, including good practices, lessons learnt and recommendations\non their setup and, where relevant, on the application of this Regulation and\nother Union legislation supervised within the sandbox.\n\n6.The modalities and the conditions of the operation of the AI regulatory\nsandboxes, including the eligibility criteria and the procedure for the\napplication, selection, participation and exiting from the sandbox, and the\nrights and obligations of the participants shall be set out in implementing\nacts. Those implementing acts shall be adopted in accordance with the\nexamination procedure referred to in Article 74(2).\n\nArticle 54 \nFurther processing of personal data for developing certain AI systems in the\npublic interest in the AI regulatory sandbox\n\n1.In the AI regulatory sandbox personal data lawfully collected for other\npurposes shall be processed for the purposes of developing and testing certain\ninnovative AI systems in the sandbox under the following conditions:\n\n(a)the innovative AI systems shall be developed for safeguarding substantial\npublic interest in one or more of the following areas:\n\n(i)the prevention, investigation, detection or prosecution of criminal\noffences or the execution of criminal penalties, including the safeguarding\nagainst and the prevention of threats to public security, under the control\nand responsibility of the competent authorities. The processing shall be based\non Member State or Union law;\n\n(ii)public safety and public health, including disease prevention, control and\ntreatment;\n\n(iii)a high level of protection and improvement of the quality of the\nenvironment;\n\n(b)the data processed are necessary for complying with one or more of the\nrequirements referred to in Title III, Chapter 2 where those requirements\ncannot be effectively fulfilled by processing anonymised, synthetic or other\nnon-personal data;\n\n(c)there are effective monitoring mechanisms to identify if any high risks to\nthe fundamental rights of the data subjects may arise during the sandbox\nexperimentation as well as response mechanism to promptly mitigate those risks\nand, where necessary, stop the processing;\n\n(d)any personal data to be processed in the context of the sandbox are in a\nfunctionally separate, isolated and protected data processing environment\nunder the control of the participants and only authorised persons have access\nto that data;\n\n(e)any personal data processed are not be transmitted, transferred or\notherwise accessed by other parties;\n\n(f)any processing of personal data in the context of the sandbox do not lead\nto measures or decisions affecting the data subjects;\n\n(g)any personal data processed in the context of the sandbox are deleted once\nthe participation in the sandbox has terminated or the personal data has\nreached the end of its retention period;\n\n(h)the logs of the processing of personal data in the context of the sandbox\nare kept for the duration of the participation in the sandbox and 1 year after\nits termination, solely for the purpose of and only as long as necessary for\nfulfilling accountability and documentation obligations under this Article or\nother application Union or Member States legislation;\n\n(i)complete and detailed description of the process and rationale behind the\ntraining, testing and validation of the AI system is kept together with the\ntesting results as part of the technical documentation in Annex IV;\n\n(j)a short summary of the AI project developed in the sandbox, its objectives\nand expected results published on the website of the competent authorities.\n\n2.Paragraph 1 is without prejudice to Union or Member States legislation\nexcluding processing for other purposes than those explicitly mentioned in\nthat legislation.\n\nArticle 55 \nMeasures for small-scale providers and users\n\n1.Member States shall undertake the following actions:\n\n(a)provide small-scale providers and start-ups with priority access to the AI\nregulatory sandboxes to the extent that they fulfil the eligibility\nconditions;\n\n(b)organise specific awareness raising activities about the application of\nthis Regulation tailored to the needs of the small-scale providers and users;\n\n(c)where appropriate, establish a dedicated channel for communication with\nsmall-scale providers and user and other innovators to provide guidance and\nrespond to queries about the implementation of this Regulation.",
"eed57da1-7fe8-44b9-b8c6-3b6245fb32ad": "2.Paragraph 1 is without prejudice to Union or Member States legislation\nexcluding processing for other purposes than those explicitly mentioned in\nthat legislation.\n\nArticle 55 \nMeasures for small-scale providers and users\n\n1.Member States shall undertake the following actions:\n\n(a)provide small-scale providers and start-ups with priority access to the AI\nregulatory sandboxes to the extent that they fulfil the eligibility\nconditions;\n\n(b)organise specific awareness raising activities about the application of\nthis Regulation tailored to the needs of the small-scale providers and users;\n\n(c)where appropriate, establish a dedicated channel for communication with\nsmall-scale providers and user and other innovators to provide guidance and\nrespond to queries about the implementation of this Regulation.\n\n2.The specific interests and needs of the small-scale providers shall be taken\ninto account when setting the fees for conformity assessment under Article 43,\nreducing those fees proportionately to their size and market size.\n\nTITLE VI\n\nGOVERNANCE\n\nChapter 1\n\nEuropean Artificial Intelligence Board\n\nArticle 56 \nEstablishment of the European Artificial Intelligence Board\n\n1.A \u2018European Artificial Intelligence Board\u2019 (the \u2018Board\u2019) is established.\n\n2.The Board shall provide advice and assistance to the Commission in order to:\n\n(a)contribute to the effective cooperation of the national supervisory\nauthorities and the Commission with regard to matters covered by this\nRegulation;\n\n(b)coordinate and contribute to guidance and analysis by the Commission and\nthe national supervisory authorities and other competent authorities on\nemerging issues across the internal market with regard to matters covered by\nthis Regulation;\n\n(c)assist the national supervisory authorities and the Commission in ensuring\nthe consistent application of this Regulation.\n\nArticle 57 \nStructure of the Board\n\n1.The Board shall be composed of the national supervisory authorities, who\nshall be represented by the head or equivalent high-level official of that\nauthority, and the European Data Protection Supervisor. Other national\nauthorities may be invited to the meetings, where the issues discussed are of\nrelevance for them.\n\n2.The Board shall adopt its rules of procedure by a simple majority of its\nmembers, following the consent of the Commission. The rules of procedure shall\nalso contain the operational aspects related to the execution of the Board\u2019s\ntasks as listed in Article 58. The Board may establish sub-groups as\nappropriate for the purpose of examining specific questions.\n\n3.The Board shall be chaired by the Commission. The Commission shall convene\nthe meetings and prepare the agenda in accordance with the tasks of the Board\npursuant to this Regulation and with its rules of procedure. The Commission\nshall provide administrative and analytical support for the activities of the\nBoard pursuant to this Regulation.\n\n4.The Board may invite external experts and observers to attend its meetings\nand may hold exchanges with interested third parties to inform its activities\nto an appropriate extent. To that end the Commission may facilitate exchanges\nbetween the Board and other Union bodies, offices, agencies and advisory\ngroups.\n\nArticle 58 \nTasks of the Board\n\nWhen providing advice and assistance to the Commission in the context of\nArticle 56(2), the Board shall in particular:\n\n(a)collect and share expertise and best practices among Member States;\n\n(b)contribute to uniform administrative practices in the Member States,\nincluding for the functioning of regulatory sandboxes referred to in Article\n53;\n\n(c)issue opinions, recommendations or written contributions on matters related\nto the implementation of this Regulation, in particular\n\n(i)on technical specifications or existing standards regarding the\nrequirements set out in Title III, Chapter 2,\n\n(ii)on the use of harmonised standards or common specifications referred to in\nArticles 40 and 41,\n\n(iii)on the preparation of guidance documents, including the guidelines\nconcerning the setting of administrative fines referred to in Article 71.\n\nCHAPTER 2\n\nnational competent authorities\n\nArticle 59 \nDesignation of national competent authorities\n\n1.National competent authorities shall be established or designated by each\nMember State for the purpose of ensuring the application and implementation of\nthis Regulation. National competent authorities shall be organised so as to\nsafeguard the objectivity and impartiality of their activities and tasks.\n\n2.Each Member State shall designate a national supervisory authority among the\nnational competent authorities. The national supervisory authority shall act\nas notifying authority and market surveillance authority unless a Member State\nhas organisational and administrative reasons to designate more than one\nauthority.\n\n3.Member States shall inform the Commission of their designation or\ndesignations and, where applicable, the reasons for designating more than one\nauthority.\n\n4.Member States shall ensure that national competent authorities are provided\nwith adequate financial and human resources to fulfil their tasks under this\nRegulation.",
"454a95f2-f6d7-4db9-8738-79175a3e4adc": "CHAPTER 2\n\nnational competent authorities\n\nArticle 59 \nDesignation of national competent authorities\n\n1.National competent authorities shall be established or designated by each\nMember State for the purpose of ensuring the application and implementation of\nthis Regulation. National competent authorities shall be organised so as to\nsafeguard the objectivity and impartiality of their activities and tasks.\n\n2.Each Member State shall designate a national supervisory authority among the\nnational competent authorities. The national supervisory authority shall act\nas notifying authority and market surveillance authority unless a Member State\nhas organisational and administrative reasons to designate more than one\nauthority.\n\n3.Member States shall inform the Commission of their designation or\ndesignations and, where applicable, the reasons for designating more than one\nauthority.\n\n4.Member States shall ensure that national competent authorities are provided\nwith adequate financial and human resources to fulfil their tasks under this\nRegulation. In particular, national competent authorities shall have a\nsufficient number of personnel permanently available whose competences and\nexpertise shall include an in-depth understanding of artificial intelligence\ntechnologies, data and data computing, fundamental rights, health and safety\nrisks and knowledge of existing standards and legal requirements.\n\n5.Member States shall report to the Commission on an annual basis on the\nstatus of the financial and human resources of the national competent\nauthorities with an assessment of their adequacy. The Commission shall\ntransmit that information to the Board for discussion and possible\nrecommendations.\n\n6.The Commission shall facilitate the exchange of experience between national\ncompetent authorities.\n\n7.National competent authorities may provide guidance and advice on the\nimplementation of this Regulation, including to small-scale providers.\nWhenever national competent authorities intend to provide guidance and advice\nwith regard to an AI system in areas covered by other Union legislation, the\ncompetent national authorities under that Union legislation shall be\nconsulted, as appropriate. Member States may also establish one central\ncontact point for communication with operators.\n\n8.When Union institutions, agencies and bodies fall within the scope of this\nRegulation, the European Data Protection Supervisor shall act as the competent\nauthority for their supervision.\n\ntitle vii\n\nEU DATABASE FOR STAND-ALONE HIGH-RISK AI SYSTEMS\n\nArticle 60 \nEU database for stand-alone high-risk AI systems\n\n1.The Commission shall, in collaboration with the Member States, set up and\nmaintain a EU database containing information referred to in paragraph 2\nconcerning high-risk AI systems referred to in Article 6(2) which are\nregistered in accordance with Article 51.\n\n2.The data listed in Annex VIII shall be entered into the EU database by the\nproviders. The Commission shall provide them with technical and administrative\nsupport.\n\n3.Information contained in the EU database shall be accessible to the public.\n\n4.The EU database shall contain personal data only insofar as necessary for\ncollecting and processing information in accordance with this Regulation. That\ninformation shall include the names and contact details of natural persons who\nare responsible for registering the system and have the legal authority to\nrepresent the provider.\n\n5.The Commission shall be the controller of the EU database. It shall also\nensure to providers adequate technical and administrative support.\n\nTITLE VIII\n\nPOST-MARKET MONITORING, INFORMATION SHARING, MARKET SURVEILLANCE\n\nChapter 1\n\nPost-market monitoring\n\nArticle 61 \nPost-market monitoring by providers and post-market monitoring plan for high-\nrisk AI systems\n\n1.Providers shall establish and document a post-market monitoring system in a\nmanner that is proportionate to the nature of the artificial intelligence\ntechnologies and the risks of the high-risk AI system.\n\n2.The post-market monitoring system shall actively and systematically collect,\ndocument and analyse relevant data provided by users or collected through\nother sources on the performance of high-risk AI systems throughout their\nlifetime, and allow the provider to evaluate the continuous compliance of AI\nsystems with the requirements set out in Title III, Chapter 2.\n\n3.The post-market monitoring system shall be based on a post-market monitoring\nplan. The post-market monitoring plan shall be part of the technical\ndocumentation referred to in Annex IV. The Commission shall adopt an\nimplementing act laying down detailed provisions establishing a template for\nthe post-market monitoring plan and the list of elements to be included in the\nplan.\n\n4.For high-risk AI systems covered by the legal acts referred to in Annex II,\nwhere a post-market monitoring system and plan is already established under\nthat legislation, the elements described in paragraphs 1, 2 and 3 shall be\nintegrated into that system and plan as appropriate.\n\nThe first subparagraph shall also apply to high-risk AI systems referred to in\npoint 5(b) of Annex III placed on the market or put into service by credit\ninstitutions regulated by Directive 2013/36/EU.",
"adcde755-920b-4ab4-a38e-189536c6e0de": "3.The post-market monitoring system shall be based on a post-market monitoring\nplan. The post-market monitoring plan shall be part of the technical\ndocumentation referred to in Annex IV. The Commission shall adopt an\nimplementing act laying down detailed provisions establishing a template for\nthe post-market monitoring plan and the list of elements to be included in the\nplan.\n\n4.For high-risk AI systems covered by the legal acts referred to in Annex II,\nwhere a post-market monitoring system and plan is already established under\nthat legislation, the elements described in paragraphs 1, 2 and 3 shall be\nintegrated into that system and plan as appropriate.\n\nThe first subparagraph shall also apply to high-risk AI systems referred to in\npoint 5(b) of Annex III placed on the market or put into service by credit\ninstitutions regulated by Directive 2013/36/EU.\n\nChapter 2\n\nSharing of information on incidents and malfunctioning\n\nArticle 62 \nReporting of serious incidents and of malfunctioning\n\n1.Providers of high-risk AI systems placed on the Union market shall report\nany serious incident or any malfunctioning of those systems which constitutes\na breach of obligations under Union law intended to protect fundamental rights\nto the market surveillance authorities of the Member States where that\nincident or breach occurred.\n\nSuch notification shall be made immediately after the provider has established\na causal link between the AI system and the incident or malfunctioning or the\nreasonable likelihood of such a link, and, in any event, not later than 15\ndays after the providers becomes aware of the serious incident or of the\nmalfunctioning.\n\n2.Upon receiving a notification related to a breach of obligations under Union\nlaw intended to protect fundamental rights, the market surveillance authority\nshall inform the national public authorities or bodies referred to in Article\n64(3). The Commission shall develop dedicated guidance to facilitate\ncompliance with the obligations set out in paragraph 1. That guidance shall be\nissued 12 months after the entry into force of this Regulation, at the latest.\n\n3.For high-risk AI systems referred to in point 5(b) of Annex III which are\nplaced on the market or put into service by providers that are credit\ninstitutions regulated by Directive 2013/36/EU and for high-risk AI systems\nwhich are safety components of devices, or are themselves devices, covered by\nRegulation (EU) 2017/745 and Regulation (EU) 2017/746, the notification of\nserious incidents or malfunctioning shall be limited to those that that\nconstitute a breach of obligations under Union law intended to protect\nfundamental rights.\n\nChapter 3\n\nEnforcement\n\nArticle 63 \nMarket surveillance and control of AI systems in the Union market\n\n1.Regulation (EU) 2019/1020 shall apply to AI systems covered by this\nRegulation. However, for the purpose of the effective enforcement of this\nRegulation:\n\n(a)any reference to an economic operator under Regulation (EU) 2019/1020 shall\nbe understood as including all operators identified in Title III, Chapter 3 of\nthis Regulation;\n\n(b)any reference to a product under Regulation (EU) 2019/1020 shall be\nunderstood as including all AI systems falling within the scope of this\nRegulation.\n\n2.The national supervisory authority shall report to the Commission on a\nregular basis the outcomes of relevant market surveillance activities. The\nnational supervisory authority shall report, without delay, to the Commission\nand relevant national competition authorities any information identified in\nthe course of market surveillance activities that may be of potential interest\nfor the application of Union law on competition rules.\n\n3.For high-risk AI systems, related to products to which legal acts listed in\nAnnex II, section A apply, the market surveillance authority for the purposes\nof this Regulation shall be the authority responsible for market surveillance\nactivities designated under those legal acts.\n\n4.For AI systems placed on the market, put into service or used by financial\ninstitutions regulated by Union legislation on financial services, the market\nsurveillance authority for the purposes of this Regulation shall be the\nrelevant authority responsible for the financial supervision of those\ninstitutions under that legislation.\n\n5.For AI systems listed in point 1(a) in so far as the systems are used for\nlaw enforcement purposes, points 6 and 7 of Annex III, Member States shall\ndesignate as market surveillance authorities for the purposes of this\nRegulation either the competent data protection supervisory authorities under\nDirective (EU) 2016/680, or Regulation 2016/679 or the national competent\nauthorities supervising the activities of the law enforcement, immigration or\nasylum authorities putting into service or using those systems.",
"53f8b176-7759-4362-9c5d-443c0beee9d6": "4.For AI systems placed on the market, put into service or used by financial\ninstitutions regulated by Union legislation on financial services, the market\nsurveillance authority for the purposes of this Regulation shall be the\nrelevant authority responsible for the financial supervision of those\ninstitutions under that legislation.\n\n5.For AI systems listed in point 1(a) in so far as the systems are used for\nlaw enforcement purposes, points 6 and 7 of Annex III, Member States shall\ndesignate as market surveillance authorities for the purposes of this\nRegulation either the competent data protection supervisory authorities under\nDirective (EU) 2016/680, or Regulation 2016/679 or the national competent\nauthorities supervising the activities of the law enforcement, immigration or\nasylum authorities putting into service or using those systems.\n\n6.Where Union institutions, agencies and bodies fall within the scope of this\nRegulation, the European Data Protection Supervisor shall act as their market\nsurveillance authority.\n\n7.Member States shall facilitate the coordination between market surveillance\nauthorities designated under this Regulation and other relevant national\nauthorities or bodies which supervise the application of Union harmonisation\nlegislation listed in Annex II or other Union legislation that might be\nrelevant for the high-risk AI systems referred to in Annex III.\n\nArticle 64 \nAccess to data and documentation\n\n1.Access to data and documentation in the context of their activities, the\nmarket surveillance authorities shall be granted full access to the training,\nvalidation and testing datasets used by the provider, including through\napplication programming interfaces (\u2018API\u2019) or other appropriate technical\nmeans and tools enabling remote access.\n\n2.Where necessary to assess the conformity of the high-risk AI system with the\nrequirements set out in Title III, Chapter 2 and upon a reasoned request, the\nmarket surveillance authorities shall be granted access to the source code of\nthe AI system.\n\n3.National public authorities or bodies which supervise or enforce the respect\nof obligations under Union law protecting fundamental rights in relation to\nthe use of high-risk AI systems referred to in Annex III shall have the power\nto request and access any documentation created or maintained under this\nRegulation when access to that documentation is necessary for the fulfilment\nof the competences under their mandate within the limits of their\njurisdiction. The relevant public authority or body shall inform the market\nsurveillance authority of the Member State concerned of any such request.\n\n4.By 3 months after the entering into force of this Regulation, each Member\nState shall identify the public authorities or bodies referred to in paragraph\n3 and make a list publicly available on the website of the national\nsupervisory authority. Member States shall notify the list to the Commission\nand all other Member States and keep the list up to date.\n\n5.Where the documentation referred to in paragraph 3 is insufficient to\nascertain whether a breach of obligations under Union law intended to protect\nfundamental rights has occurred, the public authority or body referred to\nparagraph 3 may make a reasoned request to the market surveillance authority\nto organise testing of the high-risk AI system through technical means. The\nmarket surveillance authority shall organise the testing with the close\ninvolvement of the requesting public authority or body within reasonable time\nfollowing the request.\n\n6.Any information and documentation obtained by the national public\nauthorities or bodies referred to in paragraph 3 pursuant to the provisions of\nthis Article shall be treated in compliance with the confidentiality\nobligations set out in Article 70.\n\nArticle 65 \nProcedure for dealing with AI systems presenting a risk at national level\n\n1.AI systems presenting a risk shall be understood as a product presenting a\nrisk defined in Article 3, point 19 of Regulation (EU) 2019/1020 insofar as\nrisks to the health or safety or to the protection of fundamental rights of\npersons are concerned.\n\n2.Where the market surveillance authority of a Member State has sufficient\nreasons to consider that an AI system presents a risk as referred to in\nparagraph 1, they shall carry out an evaluation of the AI system concerned in\nrespect of its compliance with all the requirements and obligations laid down\nin this Regulation. When risks to the protection of fundamental rights are\npresent, the market surveillance authority shall also inform the relevant\nnational public authorities or bodies referred to in Article 64(3). The\nrelevant operators shall cooperate as necessary with the market surveillance\nauthorities and the other national public authorities or bodies referred to in\nArticle 64(3).",
"869396a8-ad52-4320-834c-433e7219d751": "2.Where the market surveillance authority of a Member State has sufficient\nreasons to consider that an AI system presents a risk as referred to in\nparagraph 1, they shall carry out an evaluation of the AI system concerned in\nrespect of its compliance with all the requirements and obligations laid down\nin this Regulation. When risks to the protection of fundamental rights are\npresent, the market surveillance authority shall also inform the relevant\nnational public authorities or bodies referred to in Article 64(3). The\nrelevant operators shall cooperate as necessary with the market surveillance\nauthorities and the other national public authorities or bodies referred to in\nArticle 64(3).\n\nWhere, in the course of that evaluation, the market surveillance authority\nfinds that the AI system does not comply with the requirements and obligations\nlaid down in this Regulation, it shall without delay require the relevant\noperator to take all appropriate corrective actions to bring the AI system\ninto compliance, to withdraw the AI system from the market, or to recall it\nwithin a reasonable period, commensurate with the nature of the risk, as it\nmay prescribe.\n\nThe market surveillance authority shall inform the relevant notified body\naccordingly. Article 18 of Regulation (EU) 2019/1020 shall apply to the\nmeasures referred to in the second subparagraph.\n\n3.Where the market surveillance authority considers that non-compliance is not\nrestricted to its national territory, it shall inform the Commission and the\nother Member States of the results of the evaluation and of the actions which\nit has required the operator to take.\n\n4.The operator shall ensure that all appropriate corrective action is taken in\nrespect of all the AI systems concerned that it has made available on the\nmarket throughout the Union.\n\n5.Where the operator of an AI system does not take adequate corrective action\nwithin the period referred to in paragraph 2, the market surveillance\nauthority shall take all appropriate provisional measures to prohibit or\nrestrict the AI system's being made available on its national market, to\nwithdraw the product from that market or to recall it. That authority shall\ninform the Commission and the other Member States, without delay, of those\nmeasures.\n\n6.The information referred to in paragraph 5 shall include all available\ndetails, in particular the data necessary for the identification of the non-\ncompliant AI system, the origin of the AI system, the nature of the non-\ncompliance alleged and the risk involved, the nature and duration of the\nnational measures taken and the arguments put forward by the relevant\noperator. In particular, the market surveillance authorities shall indicate\nwhether the non-compliance is due to one or more of the following:\n\n(a)a failure of the AI system to meet requirements set out in Title III,\nChapter 2;\n\n(b)shortcomings in the harmonised standards or common specifications referred\nto in Articles 40 and 41 conferring a presumption of conformity.\n\n7.The market surveillance authorities of the Member States other than the\nmarket surveillance authority of the Member State initiating the procedure\nshall without delay inform the Commission and the other Member States of any\nmeasures adopted and of any additional information at their disposal relating\nto the non-compliance of the AI system concerned, and, in the event of\ndisagreement with the notified national measure, of their objections.\n\n8.Where, within three months of receipt of the information referred to in\nparagraph 5, no objection has been raised by either a Member State or the\nCommission in respect of a provisional measure taken by a Member State, that\nmeasure shall be deemed justified. This is without prejudice to the procedural\nrights of the concerned operator in accordance with Article 18 of Regulation\n(EU) 2019/1020.\n\n9.The market surveillance authorities of all Member States shall ensure that\nappropriate restrictive measures are taken in respect of the product\nconcerned, such as withdrawal of the product from their market, without delay.\n\nArticle 66 \nUnion safeguard procedure\n\n1.Where, within three months of receipt of the notification referred to in\nArticle 65(5), objections are raised by a Member State against a measure taken\nby another Member State, or where the Commission considers the measure to be\ncontrary to Union law, the Commission shall without delay enter into\nconsultation with the relevant Member State and operator or operators and\nshall evaluate the national measure. On the basis of the results of that\nevaluation, the Commission shall decide whether the national measure is\njustified or not within 9 months from the notification referred to in Article\n65(5) and notify such decision to the Member State concerned.\n\n2.If the national measure is considered justified, all Member States shall\ntake the measures necessary to ensure that the non-compliant AI system is\nwithdrawn from their market, and shall inform the Commission accordingly.",
"53c6bf36-8083-4c04-8634-e3826f160445": "Article 66 \nUnion safeguard procedure\n\n1.Where, within three months of receipt of the notification referred to in\nArticle 65(5), objections are raised by a Member State against a measure taken\nby another Member State, or where the Commission considers the measure to be\ncontrary to Union law, the Commission shall without delay enter into\nconsultation with the relevant Member State and operator or operators and\nshall evaluate the national measure. On the basis of the results of that\nevaluation, the Commission shall decide whether the national measure is\njustified or not within 9 months from the notification referred to in Article\n65(5) and notify such decision to the Member State concerned.\n\n2.If the national measure is considered justified, all Member States shall\ntake the measures necessary to ensure that the non-compliant AI system is\nwithdrawn from their market, and shall inform the Commission accordingly. If\nthe national measure is considered unjustified, the Member State concerned\nshall withdraw the measure.\n\n3.Where the national measure is considered justified and the non-compliance of\nthe AI system is attributed to shortcomings in the harmonised standards or\ncommon specifications referred to in Articles 40 and 41 of this Regulation,\nthe Commission shall apply the procedure provided for in Article 11 of\nRegulation (EU) No 1025/2012.\n\nArticle 67 \nCompliant AI systems which present a risk\n\n1.Where, having performed an evaluation under Article 65, the market\nsurveillance authority of a Member State finds that although an AI system is\nin compliance with this Regulation, it presents a risk to the health or safety\nof persons, to the compliance with obligations under Union or national law\nintended to protect fundamental rights or to other aspects of public interest\nprotection, it shall require the relevant operator to take all appropriate\nmeasures to ensure that the AI system concerned, when placed on the market or\nput into service, no longer presents that risk, to withdraw the AI system from\nthe market or to recall it within a reasonable period, commensurate with the\nnature of the risk, as it may prescribe.\n\n2.The provider or other relevant operators shall ensure that corrective action\nis taken in respect of all the AI systems concerned that they have made\navailable on the market throughout the Union within the timeline prescribed by\nthe market surveillance authority of the Member State referred to in paragraph\n1.\n\n3.The Member State shall immediately inform the Commission and the other\nMember States. That information shall include all available details, in\nparticular the data necessary for the identification of the AI system\nconcerned, the origin and the supply chain of the AI system, the nature of the\nrisk involved and the nature and duration of the national measures taken.\n\n4.The Commission shall without delay enter into consultation with the Member\nStates and the relevant operator and shall evaluate the national measures\ntaken. On the basis of the results of that evaluation, the Commission shall\ndecide whether the measure is justified or not and, where necessary, propose\nappropriate measures.\n\n5.The Commission shall address its decision to the Member States.\n\nArticle 68 \nFormal non-compliance\n\n1.Where the market surveillance authority of a Member State makes one of the\nfollowing findings, it shall require the relevant provider to put an end to\nthe non-compliance concerned:\n\n(a)the conformity marking has been affixed in violation of Article 49;\n\n(b)the conformity marking has not been affixed;\n\n(c)the EU declaration of conformity has not been drawn up;\n\n(d)the EU declaration of conformity has not been drawn up correctly;\n\n(e)the identification number of the notified body, which is involved in the\nconformity assessment procedure, where applicable, has not been affixed;\n\n2.Where the non-compliance referred to in paragraph 1 persists, the Member\nState concerned shall take all appropriate measures to restrict or prohibit\nthe high-risk AI system being made available on the market or ensure that it\nis recalled or withdrawn from the market.\n\nTITLE IX\n\nCODES OF CONDUCT\n\nArticle 69 \nCodes of conduct\n\n1.The Commission and the Member States shall encourage and facilitate the\ndrawing up of codes of conduct intended to foster the voluntary application to\nAI systems other than high-risk AI systems of the requirements set out in\nTitle III, Chapter 2 on the basis of technical specifications and solutions\nthat are appropriate means of ensuring compliance with such requirements in\nlight of the intended purpose of the systems.\n\n2.The Commission and the Board shall encourage and facilitate the drawing up\nof codes of conduct intended to foster the voluntary application to AI systems\nof requirements related for example to environmental sustainability,\naccessibility for persons with a disability, stakeholders participation in the\ndesign and development of the AI systems and diversity of development teams on\nthe basis of clear objectives and key performance indicators to measure the\nachievement of those objectives.",
"953f012b-a3e9-485f-8006-13c9307eccf6": "TITLE IX\n\nCODES OF CONDUCT\n\nArticle 69 \nCodes of conduct\n\n1.The Commission and the Member States shall encourage and facilitate the\ndrawing up of codes of conduct intended to foster the voluntary application to\nAI systems other than high-risk AI systems of the requirements set out in\nTitle III, Chapter 2 on the basis of technical specifications and solutions\nthat are appropriate means of ensuring compliance with such requirements in\nlight of the intended purpose of the systems.\n\n2.The Commission and the Board shall encourage and facilitate the drawing up\nof codes of conduct intended to foster the voluntary application to AI systems\nof requirements related for example to environmental sustainability,\naccessibility for persons with a disability, stakeholders participation in the\ndesign and development of the AI systems and diversity of development teams on\nthe basis of clear objectives and key performance indicators to measure the\nachievement of those objectives.\n\n3.Codes of conduct may be drawn up by individual providers of AI systems or by\norganisations representing them or by both, including with the involvement of\nusers and any interested stakeholders and their representative organisations.\nCodes of conduct may cover one or more AI systems taking into account the\nsimilarity of the intended purpose of the relevant systems.\n\n4.The Commission and the Board shall take into account the specific interests\nand needs of the small-scale providers and start-ups when encouraging and\nfacilitating the drawing up of codes of conduct.\n\nTITLE X\n\nCONFIDENTIALITY AND PENALTIES\n\nArticle 70 \nConfidentiality\n\n1.National competent authorities and notified bodies involved in the\napplication of this Regulation shall respect the confidentiality of\ninformation and data obtained in carrying out their tasks and activities in\nsuch a manner as to protect, in particular:\n\n(a)intellectual property rights, and confidential business information or\ntrade secrets of a natural or legal person, including source code, except the\ncases referred to in Article 5 of Directive 2016/943 on the protection of\nundisclosed know-how and business information (trade secrets) against their\nunlawful acquisition, use and disclosure apply.\n\n(b)the effective implementation of this Regulation, in particular for the\npurpose of inspections, investigations or audits;(c) public and national\nsecurity interests;\n\n(c)integrity of criminal or administrative proceedings.\n\n2.Without prejudice to paragraph 1, information exchanged on a confidential\nbasis between the national competent authorities and between national\ncompetent authorities and the Commission shall not be disclosed without the\nprior consultation of the originating national competent authority and the\nuser when high-risk AI systems referred to in points 1, 6 and 7 of Annex III\nare used by law enforcement, immigration or asylum authorities, when such\ndisclosure would jeopardise public and national security interests.\n\nWhen the law enforcement, immigration or asylum authorities are providers of\nhigh-risk AI systems referred to in points 1, 6 and 7 of Annex III, the\ntechnical documentation referred to in Annex IV shall remain within the\npremises of those authorities. Those authorities shall ensure that the market\nsurveillance authorities referred to in Article 63(5) and (6), as applicable,\ncan, upon request, immediately access the documentation or obtain a copy\nthereof. Only staff of the market surveillance authority holding the\nappropriate level of security clearance shall be allowed to access that\ndocumentation or any copy thereof.\n\n3.Paragraphs 1 and 2 shall not affect the rights and obligations of the\nCommission, Member States and notified bodies with regard to the exchange of\ninformation and the dissemination of warnings, nor the obligations of the\nparties concerned to provide information under criminal law of the Member\nStates.\n\n4.The Commission and Member States may exchange, where necessary, confidential\ninformation with regulatory authorities of third countries with which they\nhave concluded bilateral or multilateral confidentiality arrangements\nguaranteeing an adequate level of confidentiality.\n\nArticle 71 \nPenalties\n\n1.In compliance with the terms and conditions laid down in this Regulation,\nMember States shall lay down the rules on penalties, including administrative\nfines, applicable to infringements of this Regulation and shall take all\nmeasures necessary to ensure that they are properly and effectively\nimplemented. The penalties provided for shall be effective, proportionate, and\ndissuasive. They shall take into particular account the interests of small-\nscale providers and start-up and their economic viability.\n\n2.The Member States shall notify the Commission of those rules and of those\nmeasures and shall notify it, without delay, of any subsequent amendment\naffecting them.",
"f86d03df-3463-480c-a3a9-1d054159204e": "4.The Commission and Member States may exchange, where necessary, confidential\ninformation with regulatory authorities of third countries with which they\nhave concluded bilateral or multilateral confidentiality arrangements\nguaranteeing an adequate level of confidentiality.\n\nArticle 71 \nPenalties\n\n1.In compliance with the terms and conditions laid down in this Regulation,\nMember States shall lay down the rules on penalties, including administrative\nfines, applicable to infringements of this Regulation and shall take all\nmeasures necessary to ensure that they are properly and effectively\nimplemented. The penalties provided for shall be effective, proportionate, and\ndissuasive. They shall take into particular account the interests of small-\nscale providers and start-up and their economic viability.\n\n2.The Member States shall notify the Commission of those rules and of those\nmeasures and shall notify it, without delay, of any subsequent amendment\naffecting them.\n\n3.The following infringements shall be subject to administrative fines of up\nto 30 000 000 EUR or, if the offender is company, up to 6 % of its total\nworldwide annual turnover for the preceding financial year, whichever is\nhigher:\n\n(a)non-compliance with the prohibition of the artificial intelligence\npractices referred to in Article 5;\n\n(b)non-compliance of the AI system with the requirements laid down in Article\n10.\n\n4.The non-compliance of the AI system with any requirements or obligations\nunder this Regulation, other than those laid down in Articles 5 and 10, shall\nbe subject to administrative fines of up to 20 000 000 EUR or, if the offender\nis a company, up to 4 % of its total worldwide annual turnover for the\npreceding financial year, whichever is higher.\n\n5.The supply of incorrect, incomplete or misleading information to notified\nbodies and national competent authorities in reply to a request shall be\nsubject to administrative fines of up to 10 000 000 EUR or, if the offender is\na company, up to 2 % of its total worldwide annual turnover for the preceding\nfinancial year, whichever is higher.\n\n6.When deciding on the amount of the administrative fine in each individual\ncase, all relevant circumstances of the specific situation shall be taken into\naccount and due regard shall be given to the following:\n\n(a)the nature, gravity and duration of the infringement and of its\nconsequences;\n\n(b)whether administrative fines have been already applied by other market\nsurveillance authorities to the same operator for the same infringement.\n\n(c)the size and market share of the operator committing the infringement;\n\n7.Each Member State shall lay down rules on whether and to what extent\nadministrative fines may be imposed on public authorities and bodies\nestablished in that Member State.\n\n8.Depending on the legal system of the Member States, the rules on\nadministrative fines may be applied in such a manner that the fines are\nimposed by competent national courts of other bodies as applicable in those\nMember States. The application of such rules in those Member States shall have\nan equivalent effect.\n\nArticle 72 \nAdministrative fines on Union institutions, agencies and bodies\n\n1.The European Data Protection Supervisor may impose administrative fines on\nUnion institutions, agencies and bodies falling within the scope of this\nRegulation. When deciding whether to impose an administrative fine and\ndeciding on the amount of the administrative fine in each individual case, all\nrelevant circumstances of the specific situation shall be taken into account\nand due regard shall be given to the following:\n\n(a)the nature, gravity and duration of the infringement and of its\nconsequences;\n\n(b)the cooperation with the European Data Protection Supervisor in order to\nremedy the infringement and mitigate the possible adverse effects of the\ninfringement, including compliance with any of the measures previously ordered\nby the European Data Protection Supervisor against the Union institution or\nagency or body concerned with regard to the same subject matter;\n\n(c)any similar previous infringements by the Union institution, agency or\nbody;\n\n2.The following infringements shall be subject to administrative fines of up\nto 500 000 EUR:\n\n(a)non-compliance with the prohibition of the artificial intelligence\npractices referred to in Article 5;\n\n(b)non-compliance of the AI system with the requirements laid down in Article\n10.\n\n3.The non-compliance of the AI system with any requirements or obligations\nunder this Regulation, other than those laid down in Articles 5 and 10, shall\nbe subject to administrative fines of up to 250 000 EUR.\n\n4.Before taking decisions pursuant to this Article, the European Data\nProtection Supervisor shall give the Union institution, agency or body which\nis the subject of the proceedings conducted by the European Data Protection\nSupervisor the opportunity of being heard on the matter regarding the possible\ninfringement.",
"fa5e0764-204d-413a-afc0-2d0bbbc0580e": "3.The non-compliance of the AI system with any requirements or obligations\nunder this Regulation, other than those laid down in Articles 5 and 10, shall\nbe subject to administrative fines of up to 250 000 EUR.\n\n4.Before taking decisions pursuant to this Article, the European Data\nProtection Supervisor shall give the Union institution, agency or body which\nis the subject of the proceedings conducted by the European Data Protection\nSupervisor the opportunity of being heard on the matter regarding the possible\ninfringement. The European Data Protection Supervisor shall base his or her\ndecisions only on elements and circumstances on which the parties concerned\nhave been able to comment. Complainants, if any, shall be associated closely\nwith the proceedings.\n\n5.The rights of defense of the parties concerned shall be fully respected in\nthe proceedings. They shall be entitled to have access to the European Data\nProtection Supervisor\u2019s file, subject to the legitimate interest of\nindividuals or undertakings in the protection of their personal data or\nbusiness secrets.\n\n6.Funds collected by imposition of fines in this Article shall be the income\nof the general budget of the Union.\n\nTITLE XI\n\nDELEGATION OF POWER AND COMMITTEE PROCEDURE\n\nArticle 73 \nExercise of the delegation\n\n1.The power to adopt delegated acts is conferred on the Commission subject to\nthe conditions laid down in this Article.\n\n2.The delegation of power referred to in Article 4, Article 7(1), Article\n11(3), Article 43(5) and (6) and Article 48(5) shall be conferred on the\nCommission for an indeterminate period of time from [entering into force of\nthe Regulation].\n\n3.The delegation of power referred to in Article 4, Article 7(1), Article\n11(3), Article 43(5) and (6) and Article 48(5) may be revoked at any time by\nthe European Parliament or by the Council. A decision of revocation shall put\nan end to the delegation of power specified in that decision. It shall take\neffect the day following that of its publication in the Official Journal of\nthe European Union or at a later date specified therein. It shall not affect\nthe validity of any delegated acts already in force.\n\n4.As soon as it adopts a delegated act, the Commission shall notify it\nsimultaneously to the European Parliament and to the Council.\n\n5.Any delegated act adopted pursuant to Article 4, Article 7(1), Article\n11(3), Article 43(5) and (6) and Article 48(5) shall enter into force only if\nno objection has been expressed by either the European Parliament or the\nCouncil within a period of three months of notification of that act to the\nEuropean Parliament and the Council or if, before the expiry of that period,\nthe European Parliament and the Council have both informed the Commission that\nthey will not object. That period shall be extended by three months at the\ninitiative of the European Parliament or of the Council.\n\nArticle 74 \nCommittee procedure\n\n1.The Commission shall be assisted by a committee. That committee shall be a\ncommittee within the meaning of Regulation (EU) No 182/2011.\n\n2.Where reference is made to this paragraph, Article 5 of Regulation (EU) No\n182/2011 shall apply.\n\nTITLE XII\n\nFINAL PROVISIONS\n\nArticle 75 \nAmendment to Regulation (EC) No 300/2008\n\nIn Article 4(3) of Regulation (EC) No 300/2008, the following subparagraph is\nadded:\n\n\u201cWhen adopting detailed measures related to technical specifications and\nprocedures for approval and use of security equipment concerning Artificial\nIntelligence systems in the meaning of Regulation (EU) YYY/XX [on Artificial\nIntelligence] of the European Parliament and of the Council*, the requirements\nset out in Chapter 2, Title III of that Regulation shall be taken into\naccount.\u201d\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d\n\nArticle 76 \nAmendment to Regulation (EU) No 167/2013\n\nIn Article 17(5) of Regulation (EU) No 167/2013, the following subparagraph is\nadded:\n\n\u201cWhen adopting delegated acts pursuant to the first subparagraph concerning\nartificial intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence] of the European Parliament\nand of the Council*, the requirements set out in Title III, Chapter 2 of that\nRegulation shall be taken into account.",
"89d870dc-70ad-485e-9ecf-4cc8ff38d5cd": "__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d\n\nArticle 77 \nAmendment to Regulation (EU) No 168/2013\n\nIn Article 22(5) of Regulation (EU) No 168/2013, the following subparagraph is\nadded:\n\n\u201cWhen adopting delegated acts pursuant to the first subparagraph concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX on [Artificial Intelligence] of the European Parliament\nand of the Council*, the requirements set out in Title III, Chapter 2 of that\nRegulation shall be taken into account.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d\n\nArticle 78 \nAmendment to Directive 2014/90/EU\n\nIn Article 8 of Directive 2014/90/EU, the following paragraph is added:\n\n\u201c4. For Artificial Intelligence systems which are safety components in the\nmeaning of Regulation (EU) YYY/XX [on Artificial Intelligence] of the European\nParliament and of the Council*, when carrying out its activities pursuant to\nparagraph 1 and when adopting technical specifications and testing standards\nin accordance with paragraphs 2 and 3, the Commission shall take into account\nthe requirements set out in Title III, Chapter 2 of that Regulation.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d.\n\nArticle 79 \nAmendment to Directive (EU) 2016/797\n\nIn Article 5 of Directive (EU) 2016/797, the following paragraph is added:\n\n\u201c12. When adopting delegated acts pursuant to paragraph 1 and implementing\nacts pursuant to paragraph 11 concerning Artificial Intelligence systems which\nare safety components in the meaning of Regulation (EU) YYY/XX [on Artificial\nIntelligence] of the European Parliament and of the Council*, the requirements\nset out in Title III, Chapter 2 of that Regulation shall be taken into\naccount.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d.\n\nArticle 80 \nAmendment to Regulation (EU) 2018/858\n\nIn Article 5 of Regulation (EU) 2018/858 the following paragraph is added:\n\n\u201c4. When adopting delegated acts pursuant to paragraph 3 concerning Artificial\nIntelligence systems which are safety components in the meaning of Regulation\n(EU) YYY/XX [on Artificial Intelligence] of the European Parliament and of the\nCouncil *, the requirements set out in Title III, Chapter 2 of that Regulation\nshall be taken into account.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d.\n\nArticle 81 \nAmendment to Regulation (EU) 2018/1139\n\nRegulation (EU) 2018/1139 is amended as follows:\n\n(1) In Article 17, the following paragraph is added:\n\n\u201c3. Without prejudice to paragraph 2, when adopting implementing acts pursuant\nto paragraph 1 concerning Artificial Intelligence systems which are safety\ncomponents in the meaning of Regulation (EU) YYY/XX [on Artificial\nIntelligence] of the European Parliament and of the Council*, the requirements\nset out in Title III, Chapter 2 of that Regulation shall be taken into\naccount.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d\n\n(2) In Article 19, the following paragraph is added:\n\n\u201c4. When adopting delegated acts pursuant to paragraphs 1 and 2 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence], the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d\n\n(3) In Article 43, the following paragraph is added:\n\n\u201c4. When adopting implementing acts pursuant to paragraph 1 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence], the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d\n\n(4) In Article 47, the following paragraph is added:\n\n\u201c3.",
"9ee22d0c-41cd-456c-bc6a-b9b5c64bafbd": "__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d\n\n(2) In Article 19, the following paragraph is added:\n\n\u201c4. When adopting delegated acts pursuant to paragraphs 1 and 2 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence], the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d\n\n(3) In Article 43, the following paragraph is added:\n\n\u201c4. When adopting implementing acts pursuant to paragraph 1 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence], the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d\n\n(4) In Article 47, the following paragraph is added:\n\n\u201c3. When adopting delegated acts pursuant to paragraphs 1 and 2 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence], the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d\n\n(5) In Article 57, the following paragraph is added:\n\n\u201cWhen adopting those implementing acts concerning Artificial Intelligence\nsystems which are safety components in the meaning of Regulation (EU) YYY/XX\n[on Artificial Intelligence], the requirements set out in Title III, Chapter 2\nof that Regulation shall be taken into account.\u201d\n\n(6) In Article 58, the following paragraph is added:\n\n\u201c3. When adopting delegated acts pursuant to paragraphs 1 and 2 concerning\nArtificial Intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence] , the requirements set out\nin Title III, Chapter 2 of that Regulation shall be taken into account.\u201d.\n\nArticle 82 \nAmendment to Regulation (EU) 2019/2144\n\nIn Article 11 of Regulation (EU) 2019/2144, the following paragraph is added:\n\n\u201c3. When adopting the implementing acts pursuant to paragraph 2, concerning\nartificial intelligence systems which are safety components in the meaning of\nRegulation (EU) YYY/XX [on Artificial Intelligence] of the European Parliament\nand of the Council*, the requirements set out in Title III, Chapter 2 of that\nRegulation shall be taken into account.\n\n__________\n\n* Regulation (EU) YYY/XX [on Artificial Intelligence] (OJ \u2026).\u201d.\n\nArticle 83 \nAI systems already placed on the market or put into service\n\n1.This Regulation shall not apply to the AI systems which are components of\nthe large-scale IT systems established by the legal acts listed in Annex IX\nthat have been placed on the market or put into service before [12 months\nafter the date of application of this Regulation referred to in Article\n85(2)], unless the replacement or amendment of those legal acts leads to a\nsignificant change in the design or intended purpose of the AI system or AI\nsystems concerned.\n\nThe requirements laid down in this Regulation shall be taken into account,\nwhere applicable, in the evaluation of each large-scale IT systems established\nby the legal acts listed in Annex IX to be undertaken as provided for in those\nrespective acts.\n\n2.This Regulation shall apply to the high-risk AI systems, other than the ones\nreferred to in paragraph 1, that have been placed on the market or put into\nservice before [date of application of this Regulation referred to in Article\n85(2)], only if, from that date, those systems are subject to significant\nchanges in their design or intended purpose.\n\nArticle 84 \nEvaluation and review\n\n1.The Commission shall assess the need for amendment of the list in Annex III\nonce a year following the entry into force of this Regulation.\n\n2.By [three years after the date of application of this Regulation referred to\nin Article 85(2)] and every four years thereafter, the Commission shall submit\na report on the evaluation and review of this Regulation to the European\nParliament and to the Council. The reports shall be made public.\n\n3.The reports referred to in paragraph 2 shall devote specific attention to\nthe following:\n\n(a)the status of the financial and human resources of the national competent\nauthorities in order to effectively perform the tasks assigned to them under\nthis Regulation;\n\n(b)the state of penalties, and notably administrative fines as referred to in\nArticle 71(1), applied by Member States to infringements of the provisions of\nthis Regulation.",
"d582a86a-8704-4281-a071-c2331bbb0e0c": "Article 84 \nEvaluation and review\n\n1.The Commission shall assess the need for amendment of the list in Annex III\nonce a year following the entry into force of this Regulation.\n\n2.By [three years after the date of application of this Regulation referred to\nin Article 85(2)] and every four years thereafter, the Commission shall submit\na report on the evaluation and review of this Regulation to the European\nParliament and to the Council. The reports shall be made public.\n\n3.The reports referred to in paragraph 2 shall devote specific attention to\nthe following:\n\n(a)the status of the financial and human resources of the national competent\nauthorities in order to effectively perform the tasks assigned to them under\nthis Regulation;\n\n(b)the state of penalties, and notably administrative fines as referred to in\nArticle 71(1), applied by Member States to infringements of the provisions of\nthis Regulation.\n\n4.Within [three years after the date of application of this Regulation\nreferred to in Article 85(2)] and every four years thereafter, the Commission\nshall evaluate the impact and effectiveness of codes of conduct to foster the\napplication of the requirements set out in Title III, Chapter 2 and possibly\nother additional requirements for AI systems other than high-risk AI systems.\n\n5.For the purpose of paragraphs 1 to 4 the Board, the Member States and\nnational competent authorities shall provide the Commission with information\non its request.\n\n6.In carrying out the evaluations and reviews referred to in paragraphs 1 to 4\nthe Commission shall take into account the positions and findings of the\nBoard, of the European Parliament, of the Council, and of other relevant\nbodies or sources.\n\n7.The Commission shall, if necessary, submit appropriate proposals to amend\nthis Regulation, in particular taking into account developments in technology\nand in the light of the state of progress in the information society.\n\nArticle 85 \nEntry into force and application\n\n1.This Regulation shall enter into force on the twentieth day following that\nof its publication in the Official Journal of the European Union.\n\n2.This Regulation shall apply from [24 months following the entering into\nforce of the Regulation].\n\n3.By way of derogation from paragraph 2:\n\n(a)Title III, Chapter 4 and Title VI shall apply from [three months following\nthe entry into force of this Regulation];\n\n(b)Article 71 shall apply from [twelve months following the entry into force\nof this Regulation].\n\nThis Regulation shall be binding in its entirety and directly applicable in\nall Member States.\n\nDone at Brussels,\n\nFor the European Parliament For the Council\n\nThe President The President\n\nLEGISLATIVE FINANCIAL STATEMENT\n\n1.FRAMEWORK OF THE PROPOSAL/INITIATIVE\n\n1.1. Title of the proposal/initiative\n\n1.2. Policy area(s) concerned\n\n1.3. The proposal/initiative relates to:\n\n1.4. Objective(s)\n\n1.4.1. General objective(s)\n\n1.4.2. Specific objective(s)\n\n1.4.3. Expected result(s) and impact\n\n1.4.4. Indicators of performance\n\n1.5. Grounds for the proposal/initiative\n\n1.5.1. Requirement(s) to be met in the short or long term including a detailed\ntimeline for roll-out of the implementation of the initiative\n\n1.5.2. Added value of Union involvement (it may result from different factors,\ne.g. coordination gains, legal certainty, greater effectiveness or\ncomplementarities). For the purposes of this point 'added value of Union\ninvolvement' is the value resulting from Union intervention which is\nadditional to the value that would have been otherwise created by Member\nStates alone\n\n1.5.3. Lessons learned from similar experiences in the past\n\n1.5.4. Compatibility with the Multiannual Financial Framework and possible\nsynergies with other appropriate instruments\n\n1.5.5 Assessment of the different available financing options, including scope\nfor redeployment\n\n1.6. Duration and financial impact of the proposal/initiative\n\n1.7. Management mode(s) planned\n\n2.MANAGEMENT MEASURES\n\n2.1. Monitoring and reporting rules\n\n2.2. Management and control system\n\n2.2.1. Justification of the management mode(s), the funding implementation\nmechanism(s), the payment modalities and the control strategy proposed\n\n2.2.2. Information concerning the risks identified and the internal control\nsystem(s) set up to mitigate them\n\n2.2.3.",
"c18022a7-529e-4273-accd-eaf024dc74f5": "Lessons learned from similar experiences in the past\n\n1.5.4. Compatibility with the Multiannual Financial Framework and possible\nsynergies with other appropriate instruments\n\n1.5.5 Assessment of the different available financing options, including scope\nfor redeployment\n\n1.6. Duration and financial impact of the proposal/initiative\n\n1.7. Management mode(s) planned\n\n2.MANAGEMENT MEASURES\n\n2.1. Monitoring and reporting rules\n\n2.2. Management and control system\n\n2.2.1. Justification of the management mode(s), the funding implementation\nmechanism(s), the payment modalities and the control strategy proposed\n\n2.2.2. Information concerning the risks identified and the internal control\nsystem(s) set up to mitigate them\n\n2.2.3. Estimation and justification of the cost-effectiveness of the controls\n(ratio of \"control costs \u00f7 value of the related funds managed\"), and\nassessment of the expected levels of risk of error (at payment & at closure)\n\n2.3.Measures to prevent fraud and irregularities\n\n3.ESTIMATED FINANCIAL IMPACT OF THE PROPOSAL/INITIATIVE\n\n3.1.Heading(s) of the multiannual financial framework and expenditure budget\nline(s) affected\n\n3.2.Estimated financial impact of the proposal on appropriations\n\n3.2.1.Summary of estimated impact on operational appropriations\n\n3.2.2.Estimated output funded with operational appropriations\n\n3.2.3. Summary of estimated impact on administrative appropriations\n\n3.2.4.Compatibility with the current multiannual financial framework\n\n3.2.5.Third-party contributions\n\n3.3.Estimated impact on revenue\n\nLEGISLATIVE FINANCIAL STATEMENT\n\n1.FRAMEWORK OF THE PROPOSAL/INITIATIVE\n\n1.1.Title of the proposal/initiative\n\nRegulation of the European Parliament and of the Council Laying Down\nHarmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and\nAmending Certain Union Legislative Acts\n\n1.2.Policy area(s) concerned\n\nCommunications Networks, Content and Technology;\n\nInternal Market, Industry, Entrepreneurship and SMEs;\n\nThe budgetary impact concerns the new tasks entrusted with the Commission,\nincluding the support to the EU AI Board;\n\nActivity: Shaping Europe's digital future.\n\n1.3.The proposal/initiative relates to:\n\nX a new action\n\n\u25fb a new action following a pilot project/preparatory action 64\n\n\u25fb the extension of an existing action\n\n\u25fb an action redirected towards a new action\n\n1.4.Objective(s)\n\n1.4.1.General objective(s)\n\nThe general objective of the intervention is to ensure the proper functioning\nof the single market by creating the conditions for the development and use of\ntrustworthy artificial intelligence in the Union.\n\n1.4.2.Specific objective(s)\n\nSpecific objective No 1\n\nTo set requirements specific to AI systems and obligations on all value chain\nparticipants in order to ensure that AI systems placed on the market and used\nare safe and respect existing law on fundamental rights and Union values;\n\nSpecific objective No 2\n\nTo ensure legal certainty to facilitate investment and innovation in AI by\nmaking it clear what essential requirements, obligations, as well as\nconformity and compliance procedures must be followed to place or use an AI\nsystem in the Union market;\n\nSpecific objective No 3\n\nTo enhance governance and effective enforcement of existing law on fundamental\nrights and safety requirements applicable to AI systems by providing new\npowers, resources and clear rules for relevant authorities on conformity\nassessment and ex post monitoring procedures and the division of governance\nand supervision tasks between national and EU levels;\n\nSpecific objective No 4\n\nTo facilitate the development of a single market for lawful, safe and\ntrustworthy AI applications and prevent market fragmentation by taking EU\naction to set minimum requirement for AI systems to be placed and used in the\nUnion market in compliance with existing law on fundamental rights and safety.\n\n1.4.3.Expected result(s) and impact\n\nSpecify the effects which the proposal/initiative should have on the\nbeneficiaries/groups targeted.\n\nAI suppliers should benefit from a minimal but clear set of requirements,\ncreating legal certainty and ensuring access to the entire single market.\n\nAI users should benefit from legal certainty that the high-risk AI systems\nthey buy comply with European laws and values.\n\nConsumers should benefit by reducing the risk of violations of their safety or\nfundamental rights.\n\n1.4.4.Indicators of performance\n\nSpecify the indicators for monitoring implementation of the\nproposal/initiative.",
"3e5ae392-3aa1-448f-8895-87a70aaebb33": "1.4.3.Expected result(s) and impact\n\nSpecify the effects which the proposal/initiative should have on the\nbeneficiaries/groups targeted.\n\nAI suppliers should benefit from a minimal but clear set of requirements,\ncreating legal certainty and ensuring access to the entire single market.\n\nAI users should benefit from legal certainty that the high-risk AI systems\nthey buy comply with European laws and values.\n\nConsumers should benefit by reducing the risk of violations of their safety or\nfundamental rights.\n\n1.4.4.Indicators of performance\n\nSpecify the indicators for monitoring implementation of the\nproposal/initiative.\n\nIndicator 1\n\nNumber of serious incidents or AI performances which constitute a serious\nincident or a breach of fundamental rights obligations (semi-annual) by fields\nof applications and calculated a) in absolute terms, b) as share of\napplications deployed and c) as share of citizens concerned.\n\nIndicator 2\n\na) Total AI investment in the EU (annual)\n\nb) Total AI investment by Member State (annual)\n\nc) Share of companies using AI (annual)\n\nd) Share of SMEs using AI (annual)\n\na) and b) will be calculated based on official sources and benchmarked against\nprivate estimates\n\nc) and d) will be collected by regular company surveys\n\n1.5.Grounds for the proposal/initiative\n\n1.5.1.Requirement(s) to be met in the short or long term including a detailed\ntimeline for roll-out of the implementation of the initiative\n\nThe Regulation should be fully applicable one year and a half after its\nadoption. However, elements of the governance structure should be in place\nbefore then. In particular, Member States shall have appointed existing\nauthorities and/or established new authorities performing the tasks set out in\nthe legislation earlier, and the EU AI Board should be set-up and effective.\nBy the time of applicability, the European database of AI systems should be\nfully operative. In parallel to the adoption process, it is therefore\nnecessary to develop the database, so that its development has come to an end\nwhen the regulation enters into force.\n\n1.5.2.Added value of Union involvement (it may result from different factors,\ne.g. coordination gains, legal certainty, greater effectiveness or\ncomplementarities). For the purposes of this point 'added value of Union\ninvolvement' is the value resulting from Union intervention which is\nadditional to the value that would have been otherwise created by Member\nStates alone.\n\nAn emerging patchy framework of potentially divergent national rules will\nhamper the seamless provision of AI systems across the EU and is ineffective\nin ensuring the safety and protection of fundamental rights and Union values\nacross the different Member States. A common EU legislative action on AI could\nboost the internal market and has great potential to provide European industry\nwith a competitive edge at the global scene and economies of scale that cannot\nbe achieved by individual Member States alone.\n\n1.5.3.Lessons learned from similar experiences in the past\n\nThe E-commerce Directive 2000/31/EC provides the core framework for the\nfunctioning of the single market and the supervision of digital services and\nsets a basic structure for a general cooperation mechanism among Member\nStates, covering in principle all requirements applicable to digital services.\nThe evaluation of the Directive pointed to shortcomings in several aspects of\nthis cooperation mechanism, including important procedural aspects such as the\nlack of clear timeframes for response from Member States coupled with a\ngeneral lack of responsiveness to requests from their counterparts. This has\nled over the years to a lack of trust between Member States in addressing\nconcerns about providers offering digital services cross-border. The\nevaluation of the Directive showed the need to define a differentiated set of\nrules and requirements at European level. For this reason, the implementation\nof the specific obligations laid down in this Regulation would require a\nspecific cooperation mechanism at EU level, with a governance structure\nensuring coordination of specific responsible bodies at EU level.\n\n1.5.4.Compatibility with the Multiannual Financial Framework and possible\nsynergies with other appropriate instruments\n\nThe Regulation Laying Down Harmonised Rules on Artificial Intelligence and\nAmending Certain Union Legislative Acts defines a new common framework of\nrequirements applicable to AI systems, which goes well beyond the framework\nprovided by existing legislation. For this reason, a new national and European\nregulatory and coordination function needs to be established with this\nproposal.\n\nAs regards possible synergies with other appropriate instruments, the role of\nnotifying authorities at national level can be performed by national\nauthorities fulfilling similar functions sunder other EU regulations.",
"9d90027d-86a5-4562-9498-b9429c0bd9c2": "The\nevaluation of the Directive showed the need to define a differentiated set of\nrules and requirements at European level. For this reason, the implementation\nof the specific obligations laid down in this Regulation would require a\nspecific cooperation mechanism at EU level, with a governance structure\nensuring coordination of specific responsible bodies at EU level.\n\n1.5.4.Compatibility with the Multiannual Financial Framework and possible\nsynergies with other appropriate instruments\n\nThe Regulation Laying Down Harmonised Rules on Artificial Intelligence and\nAmending Certain Union Legislative Acts defines a new common framework of\nrequirements applicable to AI systems, which goes well beyond the framework\nprovided by existing legislation. For this reason, a new national and European\nregulatory and coordination function needs to be established with this\nproposal.\n\nAs regards possible synergies with other appropriate instruments, the role of\nnotifying authorities at national level can be performed by national\nauthorities fulfilling similar functions sunder other EU regulations.\n\nMoreover, by increasing trust in AI and thus encouraging investment in\ndevelopment and adoption of AI, it complements Digital Europe, for which\npromoting the diffusion of AI is one of five priorities.\n\n1.5.5.Assessment of the different available financing options, including scope\nfor redeployment\n\nThe staff will be redeployed. The other costs will be supported from the DEP.\nenvelope, given that the objective of this regulation \u2013 ensuring trustworthy\nAI \u2013 contributes directly to one key objective of Digital Europe \u2013\naccelerating AI development and deployment in Europe.\n\n1.6.Duration and financial impact of the proposal/initiative\n\n\u25fb limited duration\n\n\u2013\u25fb in effect from [DD/MM]YYYY to [DD/MM]YYYY\n\n\u2013\u25fb Financial impact from YYYY to YYYY for commitment appropriations and from\nYYYY to YYYY for payment appropriations.\n\nX unlimited duration\n\n\u2013Implementation with a start-up period from one/two (tbc) year,\n\n\u2013followed by full-scale operation.\n\n1.7.Management mode(s) planned 65\n\nX Direct management by the Commission\n\n\u2013\u25fb by its departments, including by its staff in the Union delegations;\n\n\u2013\u25fb by the executive agencies\n\n\u25fb Shared management with the Member States\n\n\u25fb Indirect management by entrusting budget implementation tasks to:\n\n\u2013\u25fb third countries or the bodies they have designated;\n\n\u2013\u25fb international organisations and their agencies (to be specified);\n\n\u2013\u25fb the EIB and the European Investment Fund;\n\n\u2013\u25fb bodies referred to in Articles 70 and 71 of the Financial Regulation;\n\n\u2013\u25fb public law bodies;\n\n\u2013\u25fb bodies governed by private law with a public service mission to the extent\nthat they provide adequate financial guarantees;\n\n\u2013\u25fb bodies governed by the private law of a Member State that are entrusted\nwith the implementation of a public-private partnership and that provide\nadequate financial guarantees;\n\n\u2013\u25fb persons entrusted with the implementation of specific actions in the CFSP\npursuant to Title V of the TEU, and identified in the relevant basic act.\n\n\u2013If more than one management mode is indicated, please provide details in the\n\u2018Comments\u2019 section.\n\nComments\n\n2.MANAGEMENT MEASURES\n\n2.1.Monitoring and reporting rules\n\nSpecify frequency and conditions.\n\nThe Regulation will be reviewed and evaluated five years from the entry into\nforce of the regulation. The Commission will report on the findings of the\nevaluation to the European Parliament, the Council and the European Economic\nand Social Committee.\n\n2.2.Management and control system(s)\n\n2.2.1.Justification of the management mode(s), the funding implementation\nmechanism(s), the payment modalities and the control strategy proposed\n\nThe Regulation establishes a new policy with regard to harmonised rules for\nthe provision of artificial intelligence systems in the internal market while\nensuring the respect of safety and fundamental rights. These new rules require\na consistency mechanism for the cross-border application of the obligations\nunder this Regulation in the form of a new advisory group coordinating the\nactivities of national authorities.\n\nIn order to face these new tasks, it is necessary to appropriately resource\nthe Commission\u2019s services. The enforcement of the new Regulation is estimated\nto require 10 FTE \u00e0 regime (5 FTE for the support to the activities of the\nBoard and 5 FTE for the European Data Protection Supervisor acting as a\nnotifying body for AI systems deployed by a body of the European Union).\n\n2.2.2.Information concerning the risks identified and the internal control\nsystem(s) set up to mitigate them\n\nIn order to ensure that the members of the Board have the possibility to make\ninformed analysis on the basis of factual evidence, it is foreseen that the\nBoard should be supported by the administrative structure of the Commission\nand that an expert group be created to provide additional expertise where\nrequired.",
"f530109d-a976-49ef-a079-481cd306b524": "In order to face these new tasks, it is necessary to appropriately resource\nthe Commission\u2019s services. The enforcement of the new Regulation is estimated\nto require 10 FTE \u00e0 regime (5 FTE for the support to the activities of the\nBoard and 5 FTE for the European Data Protection Supervisor acting as a\nnotifying body for AI systems deployed by a body of the European Union).\n\n2.2.2.Information concerning the risks identified and the internal control\nsystem(s) set up to mitigate them\n\nIn order to ensure that the members of the Board have the possibility to make\ninformed analysis on the basis of factual evidence, it is foreseen that the\nBoard should be supported by the administrative structure of the Commission\nand that an expert group be created to provide additional expertise where\nrequired.\n\n2.2.3.Estimate and justification of the cost-effectiveness of the controls\n(ratio of \"control costs \u00f7 value of the related funds managed\"), and\nassessment of the expected levels of risk of error (at payment & at closure)\n\nFor the meeting expenditure, given the low value per transaction (e.g.\nrefunding travel costs for a delegate for a meeting), standard control\nprocedures seem sufficient. Regarding the development of the database,\ncontract attribution has a strong internal control system in place in DG CNECT\nthrough centralised procurement activities.\n\n2.3.Measures to prevent fraud and irregularities\n\nSpecify existing or envisaged prevention and protection measures, e.g. from\nthe Anti-Fraud Strategy.\n\nThe existing fraud prevention measures applicable to the Commission will cover\nthe additional appropriations necessary for this Regulation.\n\n3.ESTIMATED FINANCIAL IMPACT OF THE PROPOSAL/INITIATIVE\n\n3.1.Heading(s) of the multiannual financial framework and expenditure budget\nline(s) affected\n\n\u00b7Existing budget lines\n\nIn order of multiannual financial framework headings and budget lines.\n\nHeading of multiannual financial framework\n\n|\n\nBudget line\n\n|\n\nType of \nexpenditure\n\n|\n\nContribution \n \n---|---|---|--- \n \n|\n\nNumber \n\n|\n\nDiff./Non-diff. 66\n\n|\n\nfrom EFTA countries 67\n\n|\n\nfrom candidate countries 68\n\n|\n\nfrom third countries\n\n|\n\nwithin the meaning of Article 21(2)(b) of the Financial Regulation \n \n7\n\n|\n\n20 02 06 Administrative expenditure\n\n|\n\nNon-diff.\n\n|\n\nNO\n\n|\n\nNO\n\n|\n\nNO\n\n|\n\nNO \n \n1\n\n|\n\n02 04 03 DEP Artificial Intelligence\n\n|\n\nDiff.\n\n|\n\nYES\n\n|\n\nNO\n\n|\n\nNO\n\n|\n\nNO \n \n1\n\n|\n\n02 01 30 01 Support expenditure for the Digital Europe programme\n\n|\n\nNon-diff.\n\n|\n\nYES\n\n|\n\nNO\n\n|\n\nNO\n\n|\n\nNO \n \n3.2.Estimated financial impact of the proposal on appropriations\n\n3.2.1.Summary of estimated impact on expenditure on operational appropriations\n\n\u2013\u25fb The proposal/initiative does not require the use of operational\nappropriations\n\n\u2013X The proposal/initiative requires the use of operational appropriations, as\nexplained below:\n\nEUR million (to three decimal places)\n\nHeading of multiannual financial \nframework\n\n|\n\n1\n\n| \n \n---|---|--- \n \nDG: CNECT\n\n|\n\n|\n\n|\n\n|\n\nYear \n2022\n\n|\n\nYear \n2023\n\n|\n\nYear \n2024\n\n|\n\nYear \n2025\n\n|\n\nYear \n2026\n\n|\n\nYear \n2027 69\n\n|\n\nTOTAL \n \n---|---|---|---|---|---|---|---|---|---|--- \n \n\u2022 Operational appropriations\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nBudget line 70 02 04 03\n\n|\n\nCommitments\n\n|\n\n(1a)\n\n|\n\n|\n\n1.000\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n1.000 \n \n|\n\nPayments\n\n|\n\n(2a)\n\n|\n\n|\n\n0.600\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n|\n\n1.000 \n \nBudget line\n\n|\n\nCommitments\n\n|\n\n(1b)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n|\n\nPayments\n\n|\n\n(2b)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nAppropriations of an administrative nature financed from the envelope of\nspecific programmes 71\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nBudget line 02 01 30 01\n\n|\n\n|\n\n(3)\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n|\n\n1.200 \n \nTOTAL appropriations \nfor DG CNECT\n\n|\n\nCommitments\n\n|\n\n=1a+1b +3\n\n|\n\n|\n\n1.240\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n|\n\n2.200 \n \n|\n\nPayments\n\n|\n\n=2a+2b\n\n+3\n\n|\n\n|\n\n0.",
"3c3daf6c-9b3d-42f7-90d5-cf4cd3bd27b6": "100\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n|\n\n1.000 \n \nBudget line\n\n|\n\nCommitments\n\n|\n\n(1b)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n|\n\nPayments\n\n|\n\n(2b)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nAppropriations of an administrative nature financed from the envelope of\nspecific programmes 71\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nBudget line 02 01 30 01\n\n|\n\n|\n\n(3)\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n|\n\n1.200 \n \nTOTAL appropriations \nfor DG CNECT\n\n|\n\nCommitments\n\n|\n\n=1a+1b +3\n\n|\n\n|\n\n1.240\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n|\n\n2.200 \n \n|\n\nPayments\n\n|\n\n=2a+2b\n\n+3\n\n|\n\n|\n\n0.840\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n|\n\n2.200 \n \n \n\n \n \n\n \n\n \n\u2022 TOTAL operational appropriations\n\n|\n\nCommitments\n\n|\n\n(4)\n\n|\n\n|\n\n1.000\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n1.000 \n \n---|---|---|---|---|---|---|---|---|---|--- \n \n|\n\nPayments\n\n|\n\n(5)\n\n|\n\n|\n\n0.600\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n0.100\n\n|\n\n|\n\n1.000 \n \n\u2022 TOTAL appropriations of an administrative nature financed from the envelope\nfor specific programmes\n\n|\n\n(6)\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n|\n\n1.200 \n \nTOTAL appropriations \nunder HEADING 1 \nof the multiannual financial framework\n\n|\n\nCommitments\n\n|\n\n=4+ 6\n\n|\n\n|\n\n1.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n.0.240\n\n|\n\n0.240\n\n|\n\n|\n\n2.200 \n \n|\n\nPayments\n\n|\n\n=5+ 6\n\n|\n\n|\n\n0.840\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n0.340\n\n|\n\n|\n\n2.200 \n \nIf more than one heading is affected by the proposal / initiative, repeat the\nsection above:\n\n\u2022 TOTAL operational appropriations (all operational headings)\n\n|\n\nCommitments\n\n|\n\n(4)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n---|---|---|---|---|---|---|---|---|---|--- \n \n|\n\nPayments\n\n|\n\n(5)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n\u2022 TOTAL appropriations of an administrative nature financed from the envelope\nfor specific programmes (all operational headings)\n\n|\n\n(6)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTAL appropriations \nunder HEADINGS 1 to 6 \nof the multiannual financial framework \n(Reference amount)\n\n|\n\nCommitments\n\n|\n\n=4+ 6\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n|\n\nPayments\n\n|\n\n=5+ 6\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n \n\n \n\n \n\n \nHeading of multiannual financial \nframework\n\n|\n\n7\n\n|\n\n\u2018Administrative expenditure\u2019 \n \n---|---|--- \n \nThis section should be filled in using the 'budget data of an administrative\nnature' to be firstly introduced in the [ Annex to the Legislative Financial\nStatement\n](https://myintracomm.ec.europa.eu/budgweb/EN/leg/internal/Documents/2016-5-legislative-\nfinancial-statement-ann-en.docx) (Annex V to the internal rules), which is\nuploaded to DECIDE for interservice consultation purposes.\n\nEUR million (to three decimal places)\n\n|\n\n|\n\n|\n\nYear \n2023\n\n|\n\nYear \n2024\n\n|\n\nYear \n2025\n\n|\n\nYear \n2026\n\n|\n\nYear 2027\n\n|\n\nAfter 2027 72\n\n|\n\nTOTAL \n \n---|---|---|---|---|---|---|---|---|--- \n \nDG: CNECT \n \n\u2022 Human resources\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.800 \n \n\u2022 Other administrative expenditure\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.050 \n \nTOTAL DG CNECT\n\n|\n\nAppropriations\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.850 \n \nEuropean Data Protection Supervisor\n\n| \n \n\u2022 Human resources\n\n|\n\n0.760\n\n|\n\n0.",
"0b349579-2999-4fcb-ade0-090a5b384899": "760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.800 \n \n\u2022 Other administrative expenditure\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.050 \n \nTOTAL DG CNECT\n\n|\n\nAppropriations\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.850 \n \nEuropean Data Protection Supervisor\n\n| \n \n\u2022 Human resources\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.800 \n \n\u2022 Other administrative expenditure\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTAL EDPS\n\n|\n\nAppropriations\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n0.760\n\n|\n\n3.800 \n \nTOTAL appropriations \nunder HEADING 7 \nof the multiannual financial framework\n\n|\n\n(Total commitments = Total payments)\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n7.650 \n \nEUR million (to three decimal places)\n\n|\n\n|\n\n|\n\nYear \n2022\n\n|\n\nYear \n2023\n\n|\n\nYear \n2024\n\n|\n\nYear \n2025\n\n|\n\nYear 2026\n\n|\n\nYear 2027\n\n|\n\n|\n\nTOTAL \n \n---|---|---|---|---|---|---|---|---|---|--- \n \nTOTAL appropriations \nunder HEADINGS 1 to 7 \nof the multiannual financial framework\n\n|\n\nCommitments\n\n|\n\n|\n\n2.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n|\n\n9.850 \n \n|\n\nPayments\n\n|\n\n|\n\n2.370\n\n|\n\n1.870\n\n|\n\n1.870\n\n|\n\n1.870\n\n|\n\n1.870\n\n|\n\n9.850 \n \n3.2.2.Estimated output funded with operational appropriations\n\nCommitment appropriations in EUR million (to three decimal places)\n\nIndicate objectives and outputs\n\n\u21e9\n\n|\n\n|\n\n|\n\nYear \n2022\n\n|\n\nYear \n2023\n\n|\n\nYear \n2024\n\n|\n\nYear \n2025\n\n|\n\nYear \n2026\n\n|\n\nYear \n2027\n\n|\n\nAfter \n2027 73\n\n|\n\nTOTAL \n \n---|---|---|---|---|---|---|---|---|---|--- \n \n|\n\nOUTPUTS \n \n|\n\nType\n\n|\n\nAverage cost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nNo\n\n|\n\nCost\n\n|\n\nTotal No\n\n|\n\nTotal cost \n \nSPECIFIC OBJECTIVE No 1 74 \u2026\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nDatabase\n\n|\n\n|\n\n|\n\n|\n\n|\n\n1\n\n|\n\n1.000\n\n|\n\n1\n\n|\n\n|\n\n1\n\n|\n\n|\n\n1\n\n|\n\n|\n\n1\n\n|\n\n|\n\n1\n\n|\n\n0.100\n\n|\n\n1\n\n|\n\n1.000 \n \nMeetings- Output\n\n|\n\n|\n\n|\n\n|\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n10\n\n|\n\n0.200\n\n|\n\n50\n\n|\n\n1.000 \n \nCommunication activities\n\n|\n\n|\n\n|\n\n|\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n10\n\n|\n\n0.040 \n \nSubtotal for specific objective No 1\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nSPECIFIC OBJECTIVE No 2 .\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n\\- Output\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nSubtotal for specific objective No 2\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTALS\n\n|\n\n|\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.100\n\n|\n\n65\n\n|\n\n2.200 \n \n3.2.3.",
"e5da2399-5778-415a-a000-fc0ee787412b": "040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n2\n\n|\n\n0.040\n\n|\n\n10\n\n|\n\n0.040 \n \nSubtotal for specific objective No 1\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nSPECIFIC OBJECTIVE No 2 .\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n\\- Output\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nSubtotal for specific objective No 2\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTALS\n\n|\n\n|\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.240\n\n|\n\n13\n\n|\n\n0.100\n\n|\n\n65\n\n|\n\n2.200 \n \n3.2.3.Summary of estimated impact on administrative appropriations\n\n\u2013\u25fb The proposal/initiative does not require the use of appropriations of an\nadministrative nature\n\n\u2013X The proposal/initiative requires the use of appropriations of an\nadministrative nature, as explained below:\n\nEUR million (to three decimal places)\n\n|\n\nYear \n2022\n\n|\n\nYear \n2023\n\n|\n\nYear \n2024\n\n|\n\nYear \n2025\n\n|\n\nYear \n2026\n\n|\n\nYear \n2027\n\n|\n\nYearly after\n\n2027 75\n\n|\n\nTOTAL \n \n---|---|---|---|---|---|---|---|--- \n \nHEADING 7 \nof the multiannual financial framework\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n---|---|---|---|---|---|---|---|--- \n \nHuman resources\n\n|\n\n|\n\n1.520\n\n|\n\n1.520\n\n|\n\n1.520\n\n|\n\n1.520\n\n|\n\n1.520\n\n|\n\n1.520\n\n|\n\n7.600 \n \nOther administrative expenditure\n\n|\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.010\n\n|\n\n0.050 \n \nSubtotal HEADING 7 \nof the multiannual financial framework\n\n|\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n1.530\n\n|\n\n7.650 \n \nOutside HEADING 7 76 \nof the multiannual financial framework\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n---|---|---|---|---|---|---|---|--- \n \nHuman resources\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nOther expenditure \nof an administrative nature\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n1.20 \n \nSubtotal \noutside HEADING 7 \nof the multiannual financial framework\n\n|\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n0.240\n\n|\n\n1.20 \n \nTOTAL\n\n|\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n8.850 \n \n---|---|---|---|---|---|---|---|--- \n \nThe appropriations required for human resources and other expenditure of an\nadministrative nature will be met by appropriations from the DG that are\nalready assigned to management of the action and/or have been redeployed\nwithin the DG, together if necessary with any additional allocation which may\nbe granted to the managing DG under the annual allocation procedure and in the\nlight of budgetary constraints.\n\n3.2.3.1.Estimated requirements of human resources\n\n\u2013\u25fb The proposal/initiative does not require the use of human resources.\n\n\u2013X The proposal/initiative requires the use of human resources, as explained\nbelow:\n\nEstimate to be expressed in full time equivalent units\n\n.",
"78c1c061-7077-4ab4-8d3b-82d0c44fb941": "240\n\n|\n\n1.20 \n \nTOTAL\n\n|\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n1.770\n\n|\n\n8.850 \n \n---|---|---|---|---|---|---|---|--- \n \nThe appropriations required for human resources and other expenditure of an\nadministrative nature will be met by appropriations from the DG that are\nalready assigned to management of the action and/or have been redeployed\nwithin the DG, together if necessary with any additional allocation which may\nbe granted to the managing DG under the annual allocation procedure and in the\nlight of budgetary constraints.\n\n3.2.3.1.Estimated requirements of human resources\n\n\u2013\u25fb The proposal/initiative does not require the use of human resources.\n\n\u2013X The proposal/initiative requires the use of human resources, as explained\nbelow:\n\nEstimate to be expressed in full time equivalent units\n\n.\n\n|\n\n|\n\nYear \n2023\n\n|\n\nYear 2024\n\n|\n\nYear 2025\n\n|\n\n2026\n\n|\n\n2027\n\n|\n\nAfter 2027 77\n\n| \n \n---|---|---|---|---|---|---|---|--- \n \n\u2022 Establishment plan posts (officials and temporary staff) \n \n20 01 02 01 (Headquarters and Commission\u2019s Representation Offices)\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n| \n \n20 01 02 03 (Delegations)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n01 01 01 01 (Indirect research)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n01 01 01 11 (Direct research)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nOther budget lines (specify)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n\u2022 External staff (in Full Time Equivalent unit: FTE) 78 \n \n20 02 01 (AC, END, INT from the \u2018global envelope\u2019)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n20 02 03 (AC, AL, END, INT and JPD in the delegations)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nXX 01 xx yy zz 79\n\n|\n\n\\- at Headquarters\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n|\n\n\\- in Delegations\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n01 01 01 02 (AC, END, INT - Indirect research)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n01 01 01 12 (AC, END, INT - Direct research)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nOther budget lines (specify)\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTAL\n\n|\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n|\n\n10\n\n| \n \nXX is the policy area or budget title concerned.\n\nThe human resources required will be met by staff from the DG who are already\nassigned to management of the action and/or have been redeployed within the\nDG, together if necessary with any additional allocation which may be granted\nto the managing DG under the annual allocation procedure and in the light of\nbudgetary constraints.\n\nEDPS is expected to provide half of the resources required.\n\nDescription of tasks to be carried out:\n\nOfficials and temporary staff\n\n|\n\nTo prepare a total of 13-16 meetings, draft reports, continue policy work,\ne.g. regarding future amendments of the list of high-risk AI applications, and\nmaintain relations with Member States\u2019 authorities will require four AD FTE\nand 1 AST FTE.\n\nFor AI systems developed by the EU institutions, the European Data Protection\nSupervisor is responsible. Based on past experience, it can be estimated that\n5 AD FTE are reuqired to fulfill the EDPS responsibilites under the draft\nlegislation. \n \n---|--- \n \nExternal staff\n\n| \n \n3.2.4.Compatibility with the current multiannual financial framework\n\nThe proposal/initiative:\n\n\u2013X can be fully financed through redeployment within the relevant heading of\nthe Multiannual Financial Framework (MFF).\n\nNo reporgramming is needed.\n\n\u2013\u25fb requires use of the unallocated margin under the relevant heading of the\nMFF and/or use of the special instruments as defined in the MFF Regulation.\n\nExplain what is required, specifying the headings and budget lines concerned,\nthe corresponding amounts, and the instruments proposed to be used.\n\n\u2013\u25fb requires a revision of the MFF.\n\nExplain what is required, specifying the headings and budget lines concerned\nand the corresponding amounts.",
"f36840e7-46d1-452d-afea-a24079a6d202": "Based on past experience, it can be estimated that\n5 AD FTE are reuqired to fulfill the EDPS responsibilites under the draft\nlegislation. \n \n---|--- \n \nExternal staff\n\n| \n \n3.2.4.Compatibility with the current multiannual financial framework\n\nThe proposal/initiative:\n\n\u2013X can be fully financed through redeployment within the relevant heading of\nthe Multiannual Financial Framework (MFF).\n\nNo reporgramming is needed.\n\n\u2013\u25fb requires use of the unallocated margin under the relevant heading of the\nMFF and/or use of the special instruments as defined in the MFF Regulation.\n\nExplain what is required, specifying the headings and budget lines concerned,\nthe corresponding amounts, and the instruments proposed to be used.\n\n\u2013\u25fb requires a revision of the MFF.\n\nExplain what is required, specifying the headings and budget lines concerned\nand the corresponding amounts.\n\n3.2.5.Third-party contributions\n\nThe proposal/initiative:\n\n\u2013X does not provide for co-financing by third parties\n\n\u2013\u25fb provides for the co-financing by third parties estimated below:\n\nAppropriations in EUR million (to three decimal places)\n\n|\n\nYear \nN 80\n\n|\n\nYear \nN+1\n\n|\n\nYear \nN+2\n\n|\n\nYear \nN+3\n\n|\n\nEnter as many years as necessary to show the duration of the impact (see point\n1.6)\n\n|\n\nTotal \n \n---|---|---|---|---|---|--- \n \nSpecify the co-financing body\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nTOTAL appropriations co-financed\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \n \n\n \n\n3.3.Estimated impact on revenue\n\n\u2013\u25fb The proposal/initiative has the following financial impact:\n\n\u2013\u25fb The proposal/initiative has the following financial impact:\n\n\u2013\u25fb on other revenue\n\n\u2013\u25fb on other revenue\n\n\u2013Please indicate, if the revenue is assigned to expenditure lines \u25fb\n\nEUR million (to three decimal places)\n\nBudget revenue line:\n\n|\n\nAppropriations available for the current financial year\n\n|\n\nImpact of the proposal/initiative 81 \n \n---|---|--- \n \n|\n\n|\n\nYear \nN\n\n|\n\nYear \nN+1\n\n|\n\nYear \nN+2\n\n|\n\nYear \nN+3\n\n|\n\nEnter as many years as necessary to show the duration of the impact (see point\n1.6) \n \nArticle \u2026\u2026\u2026\u2026.\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n|\n\n| \n \nFor assigned revenue, specify the budget expenditure line(s) affected.\n\nOther remarks (e.g. method/formula used for calculating the impact on revenue\nor any other information).\n\n (1) <https://ec.europa.eu/commission/sites/beta-political/files/political-guidelines-next-commission_en.pdf>\n (2) European Commission, White Paper on Artificial Intelligence - A European approach to excellence and trust, COM(2020) 65 final, 2020.\n (3) European Council, [Special meeting of the European Council (1 and 2 October 2020) \u2013 Conclusions](https://www.consilium.europa.eu/media/45910/021020-euco-final-conclusions.pdf) , EUCO 13/20, 2020, p. 6.\n (4) European Parliament resolution of 20 October 2020 with recommendations to the Commission on a framework of ethical aspects of artificial intelligence, robotics and related technologies, 2020/2012(INL).\n (5) European Council, [European Council meeting (19 October 2017) \u2013 Conclusion](https://www.consilium.europa.eu/media/21620/19-euco-final-conclusions-en.pdf) EUCO 14/17, 2017, p. 8.\n (6) Council of the European Union, [Artificial intelligence b) Conclusions on the coordinated plan on artificial intelligence-Adoption](https://data.consilium.europa.eu/doc/document/ST-6177-2019-INIT/en/pdf) 6177/19, 2019.\n (7) European Council, [Special meeting of the European Council (1and 2 October 2020) \u2013 Conclusions](https://www.consilium.europa.eu/media/45910/021020-euco-final-conclusions.pdf) EUCO 13/20, 2020.\n (8) Council of the European Union, [Presidency conclusions - The Charter of Fundamental Rights in the context of Artificial Intelligence and Digital Change](https://www.consilium.europa.eu/media/46496/st11481-en20.pdf) , 11481/20, 2020.",
"9e57434d-4a2a-415f-9ecb-e74cfaf00b80": "8.\n (6) Council of the European Union, [Artificial intelligence b) Conclusions on the coordinated plan on artificial intelligence-Adoption](https://data.consilium.europa.eu/doc/document/ST-6177-2019-INIT/en/pdf) 6177/19, 2019.\n (7) European Council, [Special meeting of the European Council (1and 2 October 2020) \u2013 Conclusions](https://www.consilium.europa.eu/media/45910/021020-euco-final-conclusions.pdf) EUCO 13/20, 2020.\n (8) Council of the European Union, [Presidency conclusions - The Charter of Fundamental Rights in the context of Artificial Intelligence and Digital Change](https://www.consilium.europa.eu/media/46496/st11481-en20.pdf) , 11481/20, 2020.\n (9) European Parliament resolution of 20 October 2020 on a framework of ethical aspects of artificial intelligence, robotics and related technologies, [2020/2012(INL)](https://oeil.secure.europarl.europa.eu/oeil/popups/ficheprocedure.do?lang=en&reference=2020/2012\\(INL\\)) .\n (10) European Parliament resolution of 20 October 2020 on a civil liability regime for artificial intelligence, [2020/2014(INL).](https://www.europarl.europa.eu/doceo/document/TA-9-2020-0276_EN.html)\n (11) European Parliament resolution of 20 October 2020 on intellectual property rights for the development of artificial intelligence technologies, [2020/2015(INI).](https://www.europarl.europa.eu/doceo/document/A-9-2020-0176_EN.html)\n (12) European Parliament Draft Report, Artificial intelligence in criminal law and its use by the police and judicial authorities in criminal matters, [2020/2016(INI)](https://oeil.secure.europarl.europa.eu/oeil/popups/ficheprocedure.do?lang=en&reference=2020/2016\\(INI\\)) [. ](. )\n (13) European Parliament Draft Report, Artificial intelligence in education, culture and the audiovisual sector, [2020/2017(INI)](https://oeil.secure.europarl.europa.eu/oeil/popups/ficheprocedure.do?reference=2020/2017\\(INI\\)&l=en) [. In that regard, the Commission has adopted the Digital Education Action Plan 2021-2027: Resetting education and training for the digital age, which foresees the development of ethical guidelines in AI and Data usage in education \u2013 Commission Communication COM(2020) 624 final.](. In that regard, the Commission has adopted the Digital Education Action Plan 2021-2027: Resetting education and training for the digital age, which foresees the development of ethical guidelines in AI and Data usage in education \u2013 Commission Communication COM\\(2020\\) 624 final.)\n (14) Directive 2013/36/EU of the European Parliament and of the Council of 26 June 2013 on access to the activity of credit institutions and the prudential supervision of credit institutions and investment firms, amending Directive 2002/87/EC and repealing Directives 2006/48/EC and 2006/49/EC Text with EEA relevance, OJ L 176, 27.6.2013, p. 338\u2013436.\n (15) Directive 2000/31/EC of the European Parliament and of the Council of 8 June 2000 on certain legal aspects of information society services, in particular electronic commerce, in the Internal Market ('Directive on electronic commerce'), OJ L 178, 17.7.2000, p. 1\u201316.\n (16) See Proposal for a REGULATION OF THE EUROPEAN PARLIAMENT AND OF THE COUNCIL on a Single Market For Digital Services (Digital Services Act) and amending Directive 2000/31/EC COM/2020/825 final.\n (17) Communication from the Commission, Shaping Europe's Digital Future, COM/2020/67 final.\n (18) [2030 Digital Compass: the European way for the Digital Decade](https://ec.europa.eu/info/strategy/priorities-2019-2024/europe-fit-digital-age/europes-digital-decade-digital-targets-2030_en) .",
"beea6879-dad3-4d83-bc5e-f787bb3eac57": "1\u201316.\n (16) See Proposal for a REGULATION OF THE EUROPEAN PARLIAMENT AND OF THE COUNCIL on a Single Market For Digital Services (Digital Services Act) and amending Directive 2000/31/EC COM/2020/825 final.\n (17) Communication from the Commission, Shaping Europe's Digital Future, COM/2020/67 final.\n (18) [2030 Digital Compass: the European way for the Digital Decade](https://ec.europa.eu/info/strategy/priorities-2019-2024/europe-fit-digital-age/europes-digital-decade-digital-targets-2030_en) .\n (19) Proposal for a Regulation on European data governance (Data Governance Act) [COM/2020/767](https://eur-lex.europa.eu/legal-content/EN/TXT/?uri=CELEX%3A52020PC0767) [.](.)\n (20) Directive (EU) 2019/1024 of the European Parliament and of the Council of 20 June 2019 on open data and the re-use of public sector information, PE/28/2019/REV/1, OJ L 172, 26.6.2019, p. 56\u201383.\n (21) [Commission Communication, A European strategy for data COM/2020/66 final.](Commission Communication, A European strategy for data COM/2020/66 final.)\n (22) [See all consultation results here.](https://ec.europa.eu/digital-single-market/en/news/white-paper-artificial-intelligence-public-consultation-towards-european-approach-excellence)\n (23) European Commission, [Building Trust in Human-Centric Artificial Intelligence](https://ec.europa.eu/digital-single-market/en/news/communication-building-trust-human-centric-artificial-intelligence) , COM(2019) 168.\n (24) HLEG, [Ethics Guidelines for Trustworthy AI](https://ec.europa.eu/newsroom/dae/document.cfm?doc_id=60419) , 2019.\n (25) HLEG, [Assessment List for Trustworthy Artificial Intelligence (ALTAI) for self-assessment](https://ec.europa.eu/digital-single-market/en/news/assessment-list-trustworthy-artificial-intelligence-altai-self-assessment) , 2020.\n (26) The AI Alliance is a multi-stakeholder forum launched in June 2018, AI Alliance <https://ec.europa.eu/digital-single-market/en/european-ai-alliance>\n (27) European Commission, [Inception Impact Assessment For a Proposal for a legal act of the European Parliament and the Council laying down requirements for Artificial Intelligence](https://ec.europa.eu/info/law/better-regulation/have-your-say/initiatives/12527-Artificial-intelligence-ethical-and-legal-requirements) [.](.)\n (28) For details of all the consultations that have been carried out see Annex 2 of the impact assessment.\n (29) High-Level Expert Group on Artificial Intelligence, [Ethics Guidelines for Trustworthy AI](https://ec.europa.eu/newsroom/dae/document.cfm?doc_id=60419) , 2019.\n (30) They were also endorsed by the Commission in its 2019 Communication on human-centric approach to AI.\n (31) OJ C [\u2026], [\u2026], p. [\u2026].\n (32) OJ C [\u2026], [\u2026], p. [\u2026].\n (33) European Council, Special meeting of the European Council (1 and 2 October 2020) \u2013 Conclusions, EUCO 13/20, 2020, p. 6.\n (34) European Parliament resolution of 20 October 2020 with recommendations to the Commission on a framework of ethical aspects of artificial intelligence, robotics and related technologies, 2020/2012(INL).\n (35) Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation) (OJ L 119, 4.5.2016, p. 1).",
"dc8cd6dd-880f-43a6-94f9-bcda5df70247": "[\u2026].\n (32) OJ C [\u2026], [\u2026], p. [\u2026].\n (33) European Council, Special meeting of the European Council (1 and 2 October 2020) \u2013 Conclusions, EUCO 13/20, 2020, p. 6.\n (34) European Parliament resolution of 20 October 2020 with recommendations to the Commission on a framework of ethical aspects of artificial intelligence, robotics and related technologies, 2020/2012(INL).\n (35) Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation) (OJ L 119, 4.5.2016, p. 1).\n (36) Regulation (EU) 2018/1725 of the European Parliament and of the Council of 23 October 2018 on the protection of natural persons with regard to the processing of personal data by the Union institutions, bodies, offices and agencies and on the free movement of such data, and repealing Regulation (EC) No 45/2001 and Decision No 1247/2002/EC (OJ L 295, 21.11.2018, p. 39)\n (37) Directive (EU) 2016/680 of the European Parliament and of the Council of 27 April 2016 on the protection of natural persons with regard to the processing of personal data by competent authorities for the purposes of the prevention, investigation, detection or prosecution of criminal offences or the execution of criminal penalties, and on the free movement of such data, and repealing Council Framework Decision 2008/977/JHA (Law Enforcement Directive) (OJ L 119, 4.5.2016, p. 89). \n (38) Council Framework Decision 2002/584/JHA of 13 June 2002 on the European arrest warrant and the surrender procedures between Member States (OJ L 190, 18.7.2002, p. 1).\n (39) Regulation (EC) No 300/2008 of the European Parliament and of the Council of 11 March 2008 on common rules in the field of civil aviation security and repealing Regulation (EC) No 2320/2002 (OJ L 97, 9.4.2008, p. 72).\n (40) Regulation (EU) No 167/2013 of the European Parliament and of the Council of 5 February 2013 on the approval and market surveillance of agricultural and forestry vehicles (OJ L 60, 2.3.2013, p. 1).\n (41) Regulation (EU) No 168/2013 of the European Parliament and of the Council of 15 January 2013 on the approval and market surveillance of two- or three-wheel vehicles and quadricycles (OJ L 60, 2.3.2013, p. 52).\n (42) Directive 2014/90/EU of the European Parliament and of the Council of 23 July 2014 on marine equipment and repealing Council Directive 96/98/EC (OJ L 257, 28.8.2014, p. 146).\n (43) Directive (EU) 2016/797 of the European Parliament and of the Council of 11 May 2016 on the interoperability of the rail system within the European Union (OJ L 138, 26.5.2016, p. 44).\n (44) Regulation (EU) 2018/858 of the European Parliament and of the Council of 30 May 2018 on the approval and market surveillance of motor vehicles and their trailers, and of systems, components and separate technical units intended for such vehicles, amending Regulations (EC) No 715/2007 and (EC) No 595/2009 and repealing Directive 2007/46/EC (OJ L 151, 14.6.2018, p. 1).",
"fe0d20f6-9866-483e-803a-fcfff7121a75": "146).\n (43) Directive (EU) 2016/797 of the European Parliament and of the Council of 11 May 2016 on the interoperability of the rail system within the European Union (OJ L 138, 26.5.2016, p. 44).\n (44) Regulation (EU) 2018/858 of the European Parliament and of the Council of 30 May 2018 on the approval and market surveillance of motor vehicles and their trailers, and of systems, components and separate technical units intended for such vehicles, amending Regulations (EC) No 715/2007 and (EC) No 595/2009 and repealing Directive 2007/46/EC (OJ L 151, 14.6.2018, p. 1).\n (45) Regulation (EU) 2018/1139 of the European Parliament and of the Council of 4 July 2018 on common rules in the field of civil aviation and establishing a European Union Aviation Safety Agency, and amending Regulations (EC) No 2111/2005, (EC) No 1008/2008, (EU) No 996/2010, (EU) No 376/2014 and Directives 2014/30/EU and 2014/53/EU of the European Parliament and of the Council, and repealing Regulations (EC) No 552/2004 and (EC) No 216/2008 of the European Parliament and of the Council and Council Regulation (EEC) No 3922/91 (OJ L 212, 22.8.2018, p. 1).\n (46) Regulation (EU) 2019/2144 of the European Parliament and of the Council of 27 November 2019 on type-approval requirements for motor vehicles and their trailers, and systems, components and separate technical units intended for such vehicles, as regards their general safety and the protection of vehicle occupants and vulnerable road users, amending Regulation (EU) 2018/858 of the European Parliament and of the Council and repealing Regulations (EC) No 78/2009, (EC) No 79/2009 and (EC) No 661/2009 of the European Parliament and of the Council and Commission Regulations (EC) No 631/2009, (EU) No 406/2010, (EU) No 672/2010, (EU) No 1003/2010, (EU) No 1005/2010, (EU) No 1008/2010, (EU) No 1009/2010, (EU) No 19/2011, (EU) No 109/2011, (EU) No 458/2011, (EU) No 65/2012, (EU) No 130/2012, (EU) No 347/2012, (EU) No 351/2012, (EU) No 1230/2012 and (EU) 2015/166 (OJ L 325, 16.12.2019, p. 1).\n (47) Regulation (EU) 2017/745 of the European Parliament and of the Council of 5 April 2017 on medical devices, amending Directive 2001/83/EC, Regulation (EC) No 178/2002 and Regulation (EC) No 1223/2009 and repealing Council Directives 90/385/EEC and 93/42/EEC (OJ L 117, 5.5.2017, p. 1).\n (48) Regulation (EU) 2017/746 of the European Parliament and of the Council of 5 April 2017 on in vitro diagnostic medical devices and repealing Directive 98/79/EC and Commission Decision 2010/227/EU (OJ L 117, 5.5.2017, p. 176).\n (49) Directive 2013/32/EU of the European Parliament and of the Council of 26 June 2013 on common procedures for granting and withdrawing international protection (OJ L 180, 29.6.2013, p. 60).\n (50) Regulation (EC) No 810/2009 of the European Parliament and of the Council of 13 July 2009 establishing a Community Code on Visas (Visa Code) (OJ L 243, 15.9.2009, p. 1).",
"0f9f9b4f-ef2f-4000-895e-971c7244c637": "1).\n (48) Regulation (EU) 2017/746 of the European Parliament and of the Council of 5 April 2017 on in vitro diagnostic medical devices and repealing Directive 98/79/EC and Commission Decision 2010/227/EU (OJ L 117, 5.5.2017, p. 176).\n (49) Directive 2013/32/EU of the European Parliament and of the Council of 26 June 2013 on common procedures for granting and withdrawing international protection (OJ L 180, 29.6.2013, p. 60).\n (50) Regulation (EC) No 810/2009 of the European Parliament and of the Council of 13 July 2009 establishing a Community Code on Visas (Visa Code) (OJ L 243, 15.9.2009, p. 1).\n (51) Regulation (EC) No 765/2008 of the European Parliament and of the Council of 9 July 2008 setting out the requirements for accreditation and market surveillance relating to the marketing of products and repealing Regulation (EEC) No 339/93 (OJ L 218, 13.8.2008, p. 30).\n (52) Decision No 768/2008/EC of the European Parliament and of the Council of 9 July 2008 on a common framework for the marketing of products, and repealing Council Decision 93/465/EEC (OJ L 218, 13.8.2008, p. 82).\n (53) Regulation (EU) 2019/1020 of the European Parliament and of the Council of 20 June 2019 on market surveillance and compliance of products and amending Directive 2004/42/EC and Regulations (EC) No 765/2008 and (EU) No 305/2011 (Text with EEA relevance) (OJ L 169, 25.6.2019, p. 1\u201344).\n (54) Regulation (EU) No 1025/2012 of the European Parliament and of the Council of 25 October 2012 on European standardisation, amending Council Directives 89/686/EEC and 93/15/EEC and Directives 94/9/EC, 94/25/EC, 95/16/EC, 97/23/EC, 98/34/EC, 2004/22/EC, 2007/23/EC, 2009/23/EC and 2009/105/EC of the European Parliament and of the Council and repealing Council Decision 87/95/EEC and Decision No 1673/2006/EC of the European Parliament and of the Council (OJ L 316, 14.11.2012, p. 12).\n (55) Regulation (EU) 2016/679 of the European Parliament and of the Council of 27 April 2016 on the protection of natural persons with regard to the processing of personal data and on the free movement of such data, and repealing Directive 95/46/EC (General Data Protection Regulation) (OJ L 119, 4.5.2016, p. 1).\n (56) Directive 2013/36/EU of the European Parliament and of the Council of 26 June 2013 on access to the activity of credit institutions and the prudential supervision of credit institutions and investment firms, amending Directive 2002/87/EC and repealing Directives 2006/48/EC and 2006/49/EC (OJ L 176, 27.6.2013, p. 338).\n (57) Directive 2001/95/EC of the European Parliament and of the Council of 3 December 2001 on general product safety (OJ L 11, 15.1.2002, p. 4).\n (58) OJ L 123, 12.5.2016, p. 1.\n (59) Regulation (EU) No 182/2011 of the European Parliament and of the Council of 16 February 2011 laying down the rules and general principles concerning mechanisms for control by the Member States of the Commission's exercise of implementing powers (OJ L 55, 28.2.2011, p.13).",
"49aecd20-ae79-4098-9a8f-407ef8e13126": "338).\n (57) Directive 2001/95/EC of the European Parliament and of the Council of 3 December 2001 on general product safety (OJ L 11, 15.1.2002, p. 4).\n (58) OJ L 123, 12.5.2016, p. 1.\n (59) Regulation (EU) No 182/2011 of the European Parliament and of the Council of 16 February 2011 laying down the rules and general principles concerning mechanisms for control by the Member States of the Commission's exercise of implementing powers (OJ L 55, 28.2.2011, p.13).\n (60) Directive 2000/31/EC of the European Parliament and of the Council of 8 June 2000 on certain legal aspects of information society services, in particular electronic commerce, in the Internal Market ('Directive on electronic commerce') (OJ L 178, 17.7.2000, p. 1).\n (61) Commission Recommendation of 6 May 2003 concerning the definition of micro, small and medium-sized enterprises (OJ L 124, 20.5.2003, p. 36).\n (62) Council Framework Decision 2002/584/JHA of 13 June 2002 on the European arrest warrant and the surrender procedures between Member States (OJ L 190, 18.7.2002, p. 1).\n (63) Regulation (EU) 2019/881 of the European Parliament and of the Council of 17 April 2019 on ENISA (the European Union Agency for Cybersecurity) and on information and communications technology cybersecurity certification and repealing Regulation (EU) No 526/2013 (Cybersecurity Act) (OJ L 151, 7.6.2019, p. 1).\n (64) As referred to in Article 54(2)(a) or (b) of the Financial Regulation\n (65) Details of management modes and references to the Financial Regulation may be found on the BudgWeb site: <http://www.cc.cec/budg/man/budgmanag/budgmanag_en.html>\n (66) Diff. = Differentiated appropriations / Non-diff. = Non-differentiated appropriations.\n (67) EFTA: European Free Trade Association. \n (68) Candidate countries and, where applicable, potential candidate countries from the Western Balkans.\n (69) Indicative and dependent on budget availability.\n (70) According to the official budget nomenclature.\n (71) Technical and/or administrative assistance and expenditure in support of the implementation of EU programmes and/or actions (former \u2018BA\u2019 lines), indirect research, direct research.\n (72) All figures in this column are indicative and subject to the continuation of the programmes and availability of appropriations\n (73) All figures in this column are indicative and subject to the continuation of the programmes and availability of appropriations\n (74) As described in point 1.4.2. \u2018Specific objective(s)\u2026\u2019 \n (75) All figures in this column are indicative and subject to the continuation of the programmes and availability of appropriations.\n (76) Technical and/or administrative assistance and expenditure in support of the implementation of EU programmes and/or actions (former \u2018BA\u2019 lines), indirect research, direct research.\n (77) All figures in this column are indicative and subject to the continuation of the programmes and availability of appropriations.\n (78) AC = Contract Staff; AL = Local Staff; END = Seconded National Expert; INT = agency staff; JPD = Junior Professionals in Delegations. \n (79) Sub-ceiling for external staff covered by operational appropriations (former \u2018BA\u2019 lines).\n (80) Year N is the year in which implementation of the proposal/initiative starts. Please replace \"N\" by the expected first year of implementation (for instance: 2021). The same for the following years.\n (81) As regards traditional own resources (customs duties, sugar levies), the amounts indicated must be net amounts, i.e. gross amounts after deduction of 20 % for collection costs.\n\n \n\n!",
"ebb5a6a8-51a4-412b-b6c5-1fe8f374304e": "(77) All figures in this column are indicative and subject to the continuation of the programmes and availability of appropriations.\n (78) AC = Contract Staff; AL = Local Staff; END = Seconded National Expert; INT = agency staff; JPD = Junior Professionals in Delegations. \n (79) Sub-ceiling for external staff covered by operational appropriations (former \u2018BA\u2019 lines).\n (80) Year N is the year in which implementation of the proposal/initiative starts. Please replace \"N\" by the expected first year of implementation (for instance: 2021). The same for the following years.\n (81) As regards traditional own resources (customs duties, sugar levies), the amounts indicated must be net amounts, i.e. gross amounts after deduction of 20 % for collection costs.\n\n \n\n![european flag](./../../../../images/eclogo.jpg)EUROPEAN COMMISSION\n\nBrussels, 21.4.2021\n\nCOM(2021) 206 final\n\nANNEXES\n\nto the\n\nProposal for a Regulation of the European Parliament and of the Council\n\nLAYING DOWN HARMONISED RULES ON ARTIFICIAL INTELLIGENCE (ARTIFICIAL\nINTELLIGENCE ACT) AND AMENDING CERTAIN UNION LEGISLATIVE ACTS \n \n \n \n \n \n \n \n \n \n\n{SEC(2021) 167 final} - {SWD(2021) 84 final} - {SWD(2021) 85 final}\n\n \n\nANNEX I \nARTIFICIAL INTELLIGENCE TECHNIQUES AND APPROACHES \nreferred to in Article 3, point 1\n\n(a)Machine learning approaches, including supervised, unsupervised and\nreinforcement learning, using a wide variety of methods including deep\nlearning;\n\n(b)Logic- and knowledge-based approaches, including knowledge representation,\ninductive (logic) programming, knowledge bases, inference and deductive\nengines, (symbolic) reasoning and expert systems;\n\n(c)Statistical approaches, Bayesian estimation, search and optimization\nmethods.\n\nANNEX II \nLIST OF UNION HARMONISATION LEGISLATION \nSection A \u2013 List of Union harmonisation legislation based on the New\nLegislative Framework\n\n1.Directive 2006/42/EC of the European Parliament and of the Council of 17 May\n2006 on machinery, and amending Directive 95/16/EC (OJ L 157, 9.6.2006, p. 24)\n[as repealed by the Machinery Regulation];\n\n2.Directive 2009/48/EC of the European Parliament and of the Council of 18\nJune 2009 on the safety of toys (OJ L 170, 30.6.2009, p. 1);\n\n3.Directive 2013/53/EU of the European Parliament and of the Council of 20\nNovember 2013 on recreational craft and personal watercraft and repealing\nDirective 94/25/EC (OJ L 354, 28.12.2013, p. 90);\n\n4.Directive 2014/33/EU of the European Parliament and of the Council of 26\nFebruary 2014 on the harmonisation of the laws of the Member States relating\nto lifts and safety components for lifts (OJ L 96, 29.3.2014, p. 251);\n\n5.Directive 2014/34/EU of the European Parliament and of the Council of 26\nFebruary 2014 on the harmonisation of the laws of the Member States relating\nto equipment and protective systems intended for use in potentially explosive\natmospheres (OJ L 96, 29.3.2014, p. 309);\n\n6.Directive 2014/53/EU of the European Parliament and of the Council of 16\nApril 2014 on the harmonisation of the laws of the Member States relating to\nthe making available on the market of radio equipment and repealing Directive\n1999/5/EC (OJ L 153, 22.5.2014, p. 62);\n\n7.Directive 2014/68/EU of the European Parliament and of the Council of 15 May\n2014 on the harmonisation of the laws of the Member States relating to the\nmaking available on the market of pressure equipment (OJ L 189, 27.6.2014, p.\n164);\n\n8.Regulation (EU) 2016/424 of the European Parliament and of the Council of 9\nMarch 2016 on cableway installations and repealing Directive 2000/9/EC (OJ L\n81, 31.3.2016, p.",
"265d82e1-4e4f-42d4-ac22-fb000797b0f3": "62);\n\n7.Directive 2014/68/EU of the European Parliament and of the Council of 15 May\n2014 on the harmonisation of the laws of the Member States relating to the\nmaking available on the market of pressure equipment (OJ L 189, 27.6.2014, p.\n164);\n\n8.Regulation (EU) 2016/424 of the European Parliament and of the Council of 9\nMarch 2016 on cableway installations and repealing Directive 2000/9/EC (OJ L\n81, 31.3.2016, p. 1);\n\n9.Regulation (EU) 2016/425 of the European Parliament and of the Council of 9\nMarch 2016 on personal protective equipment and repealing Council Directive\n89/686/EEC (OJ L 81, 31.3.2016, p. 51);\n\n10.Regulation (EU) 2016/426 of the European Parliament and of the Council of 9\nMarch 2016 on appliances burning gaseous fuels and repealing Directive\n2009/142/EC (OJ L 81, 31.3.2016, p. 99);\n\n11.Regulation (EU) 2017/745 of the European Parliament and of the Council of 5\nApril 2017 on medical devices, amending Directive 2001/83/EC, Regulation (EC)\nNo 178/2002 and Regulation (EC) No 1223/2009 and repealing Council Directives\n90/385/EEC and 93/42/EEC (OJ L 117, 5.5.2017, p. 1;\n\n12.Regulation (EU) 2017/746 of the European Parliament and of the Council of 5\nApril 2017 on in vitro diagnostic medical devices and repealing Directive\n98/79/EC and Commission Decision 2010/227/EU (OJ L 117, 5.5.2017, p. 176).\n\nSection B. List of other Union harmonisation legislation\n\n1.Regulation (EC) No 300/2008 of the European Parliament and of the Council of\n11 March 2008 on common rules in the field of civil aviation security and\nrepealing Regulation (EC) No 2320/2002 (OJ L 97, 9.4.2008, p. 72).\n\n2.Regulation (EU) No 168/2013 of the European Parliament and of the Council of\n15 January 2013 on the approval and market surveillance of two- or three-wheel\nvehicles and quadricycles (OJ L 60, 2.3.2013, p. 52);\n\n3.Regulation (EU) No 167/2013 of the European Parliament and of the Council of\n5 February 2013 on the approval and market surveillance of agricultural and\nforestry vehicles (OJ L 60, 2.3.2013, p. 1);\n\n4.Directive 2014/90/EU of the European Parliament and of the Council of 23\nJuly 2014 on marine equipment and repealing Council Directive 96/98/EC (OJ L\n257, 28.8.2014, p. 146);\n\n5.Directive (EU) 2016/797 of the European Parliament and of the Council of 11\nMay 2016 on the interoperability of the rail system within the European Union\n(OJ L 138, 26.5.2016, p. 44).\n\n6.Regulation (EU) 2018/858 of the European Parliament and of the Council of 30\nMay 2018 on the approval and market surveillance of motor vehicles and their\ntrailers, and of systems, components and separate technical units intended for\nsuch vehicles, amending Regulations (EC) No 715/2007 and (EC) No 595/2009 and\nrepealing Directive 2007/46/EC (OJ L 151, 14.6.2018, p. 1); 3.",
"29e856d3-82ad-4d0a-9b9a-2bd06240fa4d": "146);\n\n5.Directive (EU) 2016/797 of the European Parliament and of the Council of 11\nMay 2016 on the interoperability of the rail system within the European Union\n(OJ L 138, 26.5.2016, p. 44).\n\n6.Regulation (EU) 2018/858 of the European Parliament and of the Council of 30\nMay 2018 on the approval and market surveillance of motor vehicles and their\ntrailers, and of systems, components and separate technical units intended for\nsuch vehicles, amending Regulations (EC) No 715/2007 and (EC) No 595/2009 and\nrepealing Directive 2007/46/EC (OJ L 151, 14.6.2018, p. 1); 3. Regulation (EU)\n2019/2144 of the European Parliament and of the Council of 27 November 2019 on\ntype-approval requirements for motor vehicles and their trailers, and systems,\ncomponents and separate technical units intended for such vehicles, as regards\ntheir general safety and the protection of vehicle occupants and vulnerable\nroad users, amending Regulation (EU) 2018/858 of the European Parliament and\nof the Council and repealing Regulations (EC) No 78/2009, (EC) No 79/2009 and\n(EC) No 661/2009 of the European Parliament and of the Council and Commission\nRegulations (EC) No 631/2009, (EU) No 406/2010, (EU) No 672/2010, (EU) No\n1003/2010, (EU) No 1005/2010, (EU) No 1008/2010, (EU) No 1009/2010, (EU) No\n19/2011, (EU) No 109/2011, (EU) No 458/2011, (EU) No 65/2012, (EU) No\n130/2012, (EU) No 347/2012, (EU) No 351/2012, (EU) No 1230/2012 and (EU)\n2015/166 (OJ L 325, 16.12.2019, p. 1);\n\n7.Regulation (EU) 2018/1139 of the European Parliament and of the Council of 4\nJuly 2018 on common rules in the field of civil aviation and establishing a\nEuropean Union Aviation Safety Agency, and amending Regulations (EC) No\n2111/2005, (EC) No 1008/2008, (EU) No 996/2010, (EU) No 376/2014 and\nDirectives 2014/30/EU and 2014/53/EU of the European Parliament and of the\nCouncil, and repealing Regulations (EC) No 552/2004 and (EC) No 216/2008 of\nthe European Parliament and of the Council and Council Regulation (EEC) No\n3922/91 (OJ L 212, 22.8.2018, p. 1), in so far as the design, production and\nplacing on the market of aircrafts referred to in points (a) and (b) of\nArticle 2(1) thereof, where it concerns unmanned aircraft and their engines,\npropellers, parts and equipment to control them remotely, are concerned.\n\nANNEX III \nHIGH-RISK AI SYSTEMS REFERRED TO IN ARTICLE 6(2)\n\nHigh-risk AI systems pursuant to Article 6(2) are the AI systems listed in any\nof the following areas:\n\n1.Biometric identification and categorisation of natural persons:\n\n(a)AI systems intended to be used for the \u2018real-time\u2019 and \u2018post\u2019 remote\nbiometric identification of natural persons;\n\n2.Management and operation of critical infrastructure:\n\n(a)AI systems intended to be used as safety components in the management and\noperation of road traffic and the supply of water, gas, heating and\nelectricity.\n\n3.Education and vocational training:\n\n(a)AI systems intended to be used for the purpose of determining access or\nassigning natural persons to educational and vocational training institutions;\n\n(b)AI systems intended to be used for the purpose of assessing students in\neducational and vocational training institutions and for assessing\nparticipants in tests commonly required for admission to educational\ninstitutions.",
"e7651dc3-0697-4e61-9d75-31fed0252ac2": "ANNEX III \nHIGH-RISK AI SYSTEMS REFERRED TO IN ARTICLE 6(2)\n\nHigh-risk AI systems pursuant to Article 6(2) are the AI systems listed in any\nof the following areas:\n\n1.Biometric identification and categorisation of natural persons:\n\n(a)AI systems intended to be used for the \u2018real-time\u2019 and \u2018post\u2019 remote\nbiometric identification of natural persons;\n\n2.Management and operation of critical infrastructure:\n\n(a)AI systems intended to be used as safety components in the management and\noperation of road traffic and the supply of water, gas, heating and\nelectricity.\n\n3.Education and vocational training:\n\n(a)AI systems intended to be used for the purpose of determining access or\nassigning natural persons to educational and vocational training institutions;\n\n(b)AI systems intended to be used for the purpose of assessing students in\neducational and vocational training institutions and for assessing\nparticipants in tests commonly required for admission to educational\ninstitutions.\n\n4.Employment, workers management and access to self-employment:\n\n(a)AI systems intended to be used for recruitment or selection of natural\npersons, notably for advertising vacancies, screening or filtering\napplications, evaluating candidates in the course of interviews or tests;\n\n(b)AI intended to be used for making decisions on promotion and termination of\nwork-related contractual relationships, for task allocation and for monitoring\nand evaluating performance and behavior of persons in such relationships.\n\n5.Access to and enjoyment of essential private services and public services\nand benefits:\n\n(a)AI systems intended to be used by public authorities or on behalf of public\nauthorities to evaluate the eligibility of natural persons for public\nassistance benefits and services, as well as to grant, reduce, revoke, or\nreclaim such benefits and services;\n\n(b)AI systems intended to be used to evaluate the creditworthiness of natural\npersons or establish their credit score, with the exception of AI systems put\ninto service by small scale providers for their own use;\n\n(c)AI systems intended to be used to dispatch, or to establish priority in the\ndispatching of emergency first response services, including by firefighters\nand medical aid.\n\n6. Law enforcement:\n\n(a)AI systems intended to be used by law enforcement authorities for making\nindividual risk assessments of natural persons in order to assess the risk of\na natural person for offending or reoffending or the risk for potential\nvictims of criminal offences;\n\n(b)AI systems intended to be used by law enforcement authorities as polygraphs\nand similar tools or to detect the emotional state of a natural person;\n\n(c)AI systems intended to be used by law enforcement authorities to detect\ndeep fakes as referred to in article 52(3);\n\n(d)AI systems intended to be used by law enforcement authorities for\nevaluation of the reliability of evidence in the course of investigation or\nprosecution of criminal offences;\n\n(e)AI systems intended to be used by law enforcement authorities for\npredicting the occurrence or reoccurrence of an actual or potential criminal\noffence based on profiling of natural persons as referred to in Article 3(4)\nof Directive (EU) 2016/680 or assessing personality traits and characteristics\nor past criminal behaviour of natural persons or groups;\n\n(f)AI systems intended to be used by law enforcement authorities for profiling\nof natural persons as referred to in Article 3(4) of Directive (EU) 2016/680\nin the course of detection, investigation or prosecution of criminal offences;\n\n(g)AI systems intended to be used for crime analytics regarding natural\npersons, allowing law enforcement authorities to search complex related and\nunrelated large data sets available in different data sources or in different\ndata formats in order to identify unknown patterns or discover hidden\nrelationships in the data.\n\n7.Migration, asylum and border control management:\n\n(a)AI systems intended to be used by competent public authorities as\npolygraphs and similar tools or to detect the emotional state of a natural\nperson;\n\n(b)AI systems intended to be used by competent public authorities to assess a\nrisk, including a security risk, a risk of irregular immigration, or a health\nrisk, posed by a natural person who intends to enter or has entered into the\nterritory of a Member State;\n\n(c)AI systems intended to be used by competent public authorities for the\nverification of the authenticity of travel documents and supporting\ndocumentation of natural persons and detect non-authentic documents by\nchecking their security features;\n\n(d)AI systems intended to assist competent public authorities for the\nexamination of applications for asylum, visa and residence permits and\nassociated complaints with regard to the eligibility of the natural persons\napplying for a status.\n\n8.Administration of justice and democratic processes:\n\n(a)AI systems intended to assist a judicial authority in researching and\ninterpreting facts and the law and in applying the law to a concrete set of\nfacts.",
"234761d8-2e8d-49ce-b109-f6f81c784001": "8.Administration of justice and democratic processes:\n\n(a)AI systems intended to assist a judicial authority in researching and\ninterpreting facts and the law and in applying the law to a concrete set of\nfacts.\n\nANNEX IV \nTECHNICAL DOCUMENTATION referred to in Article 11(1)\n\nThe technical documentation referred to in Article 11(1) shall contain at\nleast the following information, as applicable to the relevant AI system:\n\n1.A general description of the AI system including:\n\n(a)its intended purpose, the person/s developing the system the date and the\nversion of the system;\n\n(b)how the AI system interacts or can be used to interact with hardware or\nsoftware that is not part of the AI system itself, where applicable;\n\n(c)the versions of relevant software or firmware and any requirement related\nto version update;\n\n(d)the description of all forms in which the AI system is placed on the market\nor put into service;\n\n(e)the description of hardware on which the AI system is intended to run;\n\n(f)where the AI system is a component of products, photographs or\nillustrations showing external features, marking and internal layout of those\nproducts;\n\n(g)instructions of use for the user and, where applicable installation\ninstructions;\n\n2.A detailed description of the elements of the AI system and of the process\nfor its development, including:\n\n(a)the methods and steps performed for the development of the AI system,\nincluding, where relevant, recourse to pre-trained systems or tools provided\nby third parties and how these have been used, integrated or modified by the\nprovider;\n\n(b)the design specifications of the system, namely the general logic of the AI\nsystem and of the algorithms; the key design choices including the rationale\nand assumptions made, also with regard to persons or groups of persons on\nwhich the system is intended to be used; the main classification choices; what\nthe system is designed to optimise for and the relevance of the different\nparameters; the decisions about any possible trade-off made regarding the\ntechnical solutions adopted to comply with the requirements set out in Title\nIII, Chapter 2;\n\n(c)the description of the system architecture explaining how software\ncomponents build on or feed into each other and integrate into the overall\nprocessing; the computational resources used to develop, train, test and\nvalidate the AI system;\n\n(d)where relevant, the data requirements in terms of datasheets describing the\ntraining methodologies and techniques and the training data sets used,\nincluding information about the provenance of those data sets, their scope and\nmain characteristics; how the data was obtained and selected; labelling\nprocedures (e.g. for supervised learning), data cleaning methodologies (e.g.\noutliers detection);\n\n(e)assessment of the human oversight measures needed in accordance with\nArticle 14, including an assessment of the technical measures needed to\nfacilitate the interpretation of the outputs of AI systems by the users, in\naccordance with Articles 13(3)(d);\n\n(f)where applicable, a detailed description of pre-determined changes to the\nAI system and its performance, together with all the relevant information\nrelated to the technical solutions adopted to ensure continuous compliance of\nthe AI system with the relevant requirements set out in Title III, Chapter 2;\n\n(g)the validation and testing procedures used, including information about the\nvalidation and testing data used and their main characteristics; metrics used\nto measure accuracy, robustness, cybersecurity and compliance with other\nrelevant requirements set out in Title III, Chapter 2 as well as potentially\ndiscriminatory impacts; test logs and all test reports dated and signed by the\nresponsible persons, including with regard to pre-determined changes as\nreferred to under point (f).",
"111e4e21-c103-49df-b516-4357d244a3fa": "3.Detailed information about the monitoring, functioning and control of the AI\nsystem, in particular with regard to: its capabilities and limitations in\nperformance, including the degrees of accuracy for specific persons or groups\nof persons on which the system is intended to be used and the overall expected\nlevel of accuracy in relation to its intended purpose; the foreseeable\nunintended outcomes and sources of risks to health and safety, fundamental\nrights and discrimination in view of the intended purpose of the AI system;\nthe human oversight measures needed in accordance with Article 14, including\nthe technical measures put in place to facilitate the interpretation of the\noutputs of AI systems by the users; specifications on input data, as\nappropriate;\n\n4.A detailed description of the risk management system in accordance with\nArticle 9;\n\n5.A description of any change made to the system through its lifecycle;\n\n6.A list of the harmonised standards applied in full or in part the references\nof which have been published in the Official Journal of the European Union;\nwhere no such harmonised standards have been applied, a detailed description\nof the solutions adopted to meet the requirements set out in Title III,\nChapter 2, including a list of other relevant standards and technical\nspecifications applied;\n\n7.A copy of the EU declaration of conformity;\n\n8.A detailed description of the system in place to evaluate the AI system\nperformance in the post-market phase in accordance with Article 61, including\nthe post-market monitoring plan referred to in Article 61(3).\n\nANNEX V \nEU DECLARATION OF CONFORMITY\n\nThe EU declaration of conformity referred to in Article 48, shall contain all\nof the following information:\n\n1.AI system name and type and any additional unambiguous reference allowing\nidentification and traceability of the AI system;\n\n2.Name and address of the provider or, where applicable, their authorised\nrepresentative;\n\n3.A statement that the EU declaration of conformity is issued under the sole\nresponsibility of the provider;\n\n4.A statement that the AI system in question is in conformity with this\nRegulation and, if applicable, with any other relevant Union legislation that\nprovides for the issuing of an EU declaration of conformity;\n\n5.References to any relevant harmonised standards used or any other common\nspecification in relation to which conformity is declared;\n\n6.Where applicable, the name and identification number of the notified body, a\ndescription of the conformity assessment procedure performed and\nidentification of the certificate issued;\n\n7.Place and date of issue of the declaration, name and function of the person\nwho signed it as well as an indication for, and on behalf of whom, that person\nsigned, signature.\n\nANNEX VI \nCONFORMITY ASSESSMENT PROCEDURE BASED ON INTERNAL CONTROL\n\n1.The conformity assessment procedure based on internal control is the\nconformity assessment procedure based on points 2 to 4.\n\n2.The provider verifies that the established quality management system is in\ncompliance with the requirements of Article 17.\n\n3.The provider examines the information contained in the technical\ndocumentation in order to assess the compliance of the AI system with the\nrelevant essential requirements set out in Title III, Chapter 2.\n\n4.The provider also verifies that the design and development process of the AI\nsystem and its post-market monitoring as referred to in Article 61 is\nconsistent with the technical documentation.\n\nANNEX VII \nCONFORMITY BASED ON ASSESSMENT OF QUALITY MANAGEMENT SYSTEM AND ASSESSMENT OF\nTECHNICAL DOCUMENTATION\n\n1.Introduction\n\nConformity based on assessment of quality management system and assessment of\nthe technical documentation is the conformity assessment procedure based on\npoints 2 to 5.\n\n2.Overview\n\nThe approved quality management system for the design, development and testing\nof AI systems pursuant to Article 17 shall be examined in accordance with\npoint 3 and shall be subject to surveillance as specified in point 5. The\ntechnical documentation of the AI system shall be examined in accordance with\npoint 4.\n\n3.Quality management system\n\n3.1.The application of the provider shall include:\n\n(a)the name and address of the provider and, if the application is lodged by\nthe authorised representative, their name and address as well;\n\n(b)the list of AI systems covered under the same quality management system;\n\n(c)the technical documentation for each AI system covered under the same\nquality management system;\n\n(d)the documentation concerning the quality management system which shall\ncover all the aspects listed under Article 17;\n\n(e)a description of the procedures in place to ensure that the quality\nmanagement system remains adequate and effective;\n\n(f)a written declaration that the same application has not been lodged with\nany other notified body.\n\n3.2.The quality management system shall be assessed by the notified body,\nwhich shall determine whether it satisfies the requirements referred to in\nArticle 17.",
"f07f4b4f-8506-49b2-87bf-bded5db3d7f0": "The\ntechnical documentation of the AI system shall be examined in accordance with\npoint 4.\n\n3.Quality management system\n\n3.1.The application of the provider shall include:\n\n(a)the name and address of the provider and, if the application is lodged by\nthe authorised representative, their name and address as well;\n\n(b)the list of AI systems covered under the same quality management system;\n\n(c)the technical documentation for each AI system covered under the same\nquality management system;\n\n(d)the documentation concerning the quality management system which shall\ncover all the aspects listed under Article 17;\n\n(e)a description of the procedures in place to ensure that the quality\nmanagement system remains adequate and effective;\n\n(f)a written declaration that the same application has not been lodged with\nany other notified body.\n\n3.2.The quality management system shall be assessed by the notified body,\nwhich shall determine whether it satisfies the requirements referred to in\nArticle 17.\n\nThe decision shall be notified to the provider or its authorised\nrepresentative.\n\nThe notification shall contain the conclusions of the assessment of the\nquality management system and the reasoned assessment decision.\n\n3.3.The quality management system as approved shall continue to be implemented\nand maintained by the provider so that it remains adequate and efficient.\n\n3.4.Any intended change to the approved quality management system or the list\nof AI systems covered by the latter shall be brought to the attention of the\nnotified body by the provider.\n\nThe proposed changes shall be examined by the notified body, which shall\ndecide whether the modified quality management system continues to satisfy the\nrequirements referred to in point 3.2 or whether a reassessment is necessary.\n\nThe notified body shall notify the provider of its decision. The notification\nshall contain the conclusions of the examination of the changes and the\nreasoned assessment decision.\n\n4.Control of the technical documentation.\n\n4.1.In addition to the application referred to in point 3, an application with\na notified body of their choice shall be lodged by the provider for the\nassessment of the technical documentation relating to the AI system which the\nprovider intends to place on the market or put into service and which is\ncovered by the quality management system referred to under point 3.\n\n4.2.The application shall include:\n\n(a)the name and address of the provider;\n\n(b)a written declaration that the same application has not been lodged with\nany other notified body;\n\n(c)the technical documentation referred to in Annex IV.\n\n4.3.The technical documentation shall be examined by the notified body. To\nthis purpose, the notified body shall be granted full access to the training\nand testing datasets used by the provider, including through application\nprogramming interfaces (API) or other appropriate means and tools enabling\nremote access.\n\n4.4.In examining the technical documentation, the notified body may require\nthat the provider supplies further evidence or carries out further tests so as\nto enable a proper assessment of conformity of the AI system with the\nrequirements set out in Title III, Chapter 2. Whenever the notified body is\nnot satisfied with the tests carried out by the provider, the notified body\nshall directly carry out adequate tests, as appropriate.\n\n4.5.Where necessary to assess the conformity of the high-risk AI system with\nthe requirements set out in Title III, Chapter 2 and upon a reasoned request,\nthe notified body shall also be granted access to the source code of the AI\nsystem.\n\n4.6.The decision shall be notified to the provider or its authorised\nrepresentative. The notification shall contain the conclusions of the\nassessment of the technical documentation and the reasoned assessment\ndecision.\n\nWhere the AI system is in conformity with the requirements set out in Title\nIII, Chapter 2, an EU technical documentation assessment certificate shall be\nissued by the notified body. The certificate shall indicate the name and\naddress of the provider, the conclusions of the examination, the conditions\n(if any) for its validity and the data necessary for the identification of the\nAI system.\n\nThe certificate and its annexes shall contain all relevant information to\nallow the conformity of the AI system to be evaluated, and to allow for\ncontrol of the AI system while in use, where applicable.\n\nWhere the AI system is not in conformity with the requirements set out in\nTitle III, Chapter 2, the notified body shall refuse to issue an EU technical\ndocumentation assessment certificate and shall inform the applicant\naccordingly, giving detailed reasons for its refusal.\n\nWhere the AI system does not meet the requirement relating to the data used to\ntrain it, re-training of the AI system will be needed prior to the application\nfor a new conformity assessment. In this case, the reasoned assessment\ndecision of the notified body refusing to issue the EU technical documentation\nassessment certificate shall contain specific considerations on the quality\ndata used to train the AI system, notably on the reasons for non-compliance.",
"9f12ca0c-d435-4f5b-8ef6-1f82a017bb3b": "The certificate and its annexes shall contain all relevant information to\nallow the conformity of the AI system to be evaluated, and to allow for\ncontrol of the AI system while in use, where applicable.\n\nWhere the AI system is not in conformity with the requirements set out in\nTitle III, Chapter 2, the notified body shall refuse to issue an EU technical\ndocumentation assessment certificate and shall inform the applicant\naccordingly, giving detailed reasons for its refusal.\n\nWhere the AI system does not meet the requirement relating to the data used to\ntrain it, re-training of the AI system will be needed prior to the application\nfor a new conformity assessment. In this case, the reasoned assessment\ndecision of the notified body refusing to issue the EU technical documentation\nassessment certificate shall contain specific considerations on the quality\ndata used to train the AI system, notably on the reasons for non-compliance.\n\n4.7.Any change to the AI system that could affect the compliance of the AI\nsystem with the requirements or its intended purpose shall be approved by the\nnotified body which issued the EU technical documentation assessment\ncertificate. The provider shall inform such notified body of its intention to\nintroduce any of the above-mentioned changes or if it becomes otherwise aware\nof the occurrence of such changes. The intended changes shall be assessed by\nthe notified body which shall decide whether those changes require a new\nconformity assessment in accordance with Article 43(4) or whether they could\nbe addressed by means of a supplement to the EU technical documentation\nassessment certificate. In the latter case, the notified body shall assess the\nchanges, notify the provider of its decision and, where the changes are\napproved, issue to the provider a supplement to the EU technical documentation\nassessment certificate.\n\n5.Surveillance of the approved quality management system.\n\n5.1.The purpose of the surveillance carried out by the notified body referred\nto in Point 3 is to make sure that the provider duly fulfils the terms and\nconditions of the approved quality management system.\n\n5.2.For assessment purposes, the provider shall allow the notified body to\naccess the premises where the design, development, testing of the AI systems\nis taking place. The provider shall further share with the notified body all\nnecessary information.\n\n5.3.The notified body shall carry out periodic audits to make sure that the\nprovider maintains and applies the quality management system and shall provide\nthe provider with an audit report. In the context of those audits, the\nnotified body may carry out additional tests of the AI systems for which an EU\ntechnical documentation assessment certificate was issued.\n\nANNEX VIII \nINFORMATION TO BE SUBMITTED UPON THE REGISTRATION OF HIGH-RISK AI SYSTEMS IN\nACCORDANCE WITH ARTICLE 51\n\nThe following information shall be provided and thereafter kept up to date\nwith regard to high-risk AI systems to be registered in accordance with\nArticle 51.\n\n1.Name, address and contact details of the provider;\n\n2.Where submission of information is carried out by another person on behalf\nof the provider, the name, address and contact details of that person;\n\n3.Name, address and contact details of the authorised representative, where\napplicable;\n\n4.AI system trade name and any additional unambiguous reference allowing\nidentification and traceability of the AI system;\n\n5.Description of the intended purpose of the AI system;\n\n6.Status of the AI system (on the market, or in service; no longer placed on\nthe market/in service, recalled);\n\n7.Type, number and expiry date of the certificate issued by the notified body\nand the name or identification number of that notified body, when applicable;\n\n8.A scanned copy of the certificate referred to in point 7, when applicable;\n\n9.Member States in which the AI system is or has been placed on the market,\nput into service or made available in the Union;\n\n10.A copy of the EU declaration of conformity referred to in Article 48;\n\n11.Electronic instructions for use; this information shall not be provided for\nhigh-risk AI systems in the areas of law enforcement and migration, asylum and\nborder control management referred to in Annex III, points 1, 6 and 7.\n\n12.URL for additional information (optional).\n\nANNEX IX \nUnion legislation ON large-scale IT systems in the area of Freedom, Security\nand Justice\n\n1.Schengen Information System\n\n(a)Regulation (EU) 2018/1860 of the European Parliament and of the Council of\n28 November 2018 on the use of the Schengen Information System for the return\nof illegally staying third-country nationals (OJ L 312, 7.12.2018, p. 1).",
"6348f837-b100-4983-8783-890a06486a8f": "12.URL for additional information (optional).\n\nANNEX IX \nUnion legislation ON large-scale IT systems in the area of Freedom, Security\nand Justice\n\n1.Schengen Information System\n\n(a)Regulation (EU) 2018/1860 of the European Parliament and of the Council of\n28 November 2018 on the use of the Schengen Information System for the return\nof illegally staying third-country nationals (OJ L 312, 7.12.2018, p. 1).\n\n(b)Regulation (EU) 2018/1861 of the European Parliament and of the Council of\n28 November 2018 on the establishment, operation and use of the Schengen\nInformation System (SIS) in the field of border checks, and amending the\nConvention implementing the Schengen Agreement, and amending and repealing\nRegulation (EC) No 1987/2006 (OJ L 312, 7.12.2018, p. 14)\n\n(c)Regulation (EU) 2018/1862 of the European Parliament and of the Council of\n28 November 2018 on the establishment, operation and use of the Schengen\nInformation System (SIS) in the field of police cooperation and judicial\ncooperation in criminal matters, amending and repealing Council Decision\n2007/533/JHA, and repealing Regulation (EC) No 1986/2006 of the European\nParliament and of the Council and Commission Decision 2010/261/EU (OJ L 312,\n7.12.2018, p. 56).\n\n2.Visa Information System\n\n(a)Proposal for a REGULATION OF THE EUROPEAN PARLIAMENT AND OF THE COUNCIL\namending Regulation (EC) No 767/2008, Regulation (EC) No 810/2009, Regulation\n(EU) 2017/2226, Regulation (EU) 2016/399, Regulation XX/2018 [Interoperability\nRegulation], and Decision 2004/512/EC and repealing Council Decision\n2008/633/JHA - COM(2018) 302 final. To be updated once the Regulation is\nadopted (April/May 2021) by the co-legislators.\n\n3.Eurodac\n\n(a)Amended proposal for a REGULATION OF THE EUROPEAN PARLIAMENT AND OF THE\nCOUNCIL on the establishment of 'Eurodac' for the comparison of biometric data\nfor the effective application of Regulation (EU) XXX/XXX [Regulation on Asylum\nand Migration Management] and of Regulation (EU) XXX/XXX [Resettlement\nRegulation], for identifying an illegally staying third-country national or\nstateless person and on requests for the comparison with Eurodac data by\nMember States' law enforcement authorities and Europol for law enforcement\npurposes and amending Regulations (EU) 2018/1240 and (EU) 2019/818 \u2013 COM(2020)\n614 final.\n\n4.Entry/Exit System\n\n(a)Regulation (EU) 2017/2226 of the European Parliament and of the Council of\n30 November 2017 establishing an Entry/Exit System (EES) to register entry and\nexit data and refusal of entry data of third-country nationals crossing the\nexternal borders of the Member States and determining the conditions for\naccess to the EES for law enforcement purposes, and amending the Convention\nimplementing the Schengen Agreement and Regulations (EC) No 767/2008 and (EU)\nNo 1077/2011 (OJ L 327, 9.12.2017, p. 20).\n\n5.European Travel Information and Authorisation System\n\n(a)Regulation (EU) 2018/1240 of the European Parliament and of the Council of\n12 September 2018 establishing a European Travel Information and Authorisation\nSystem (ETIAS) and amending Regulations (EU) No 1077/2011, (EU) No 515/2014,\n(EU) 2016/399, (EU) 2016/1624 and (EU) 2017/2226 (OJ L 236, 19.9.2018, p. 1).\n\n(b)Regulation (EU) 2018/1241 of the European Parliament and of the Council of\n12 September 2018 amending Regulation (EU) 2016/794 for the purpose of\nestablishing a European Travel Information and Authorisation System (ETIAS)\n(OJ L 236, 19.9.2018, p. 72).",
"3dce8135-5b30-4bb1-8403-7a64f25f4bd2": "1).\n\n(b)Regulation (EU) 2018/1241 of the European Parliament and of the Council of\n12 September 2018 amending Regulation (EU) 2016/794 for the purpose of\nestablishing a European Travel Information and Authorisation System (ETIAS)\n(OJ L 236, 19.9.2018, p. 72).\n\n6.European Criminal Records Information System on third-country nationals and\nstateless persons\n\n(a)Regulation (EU) 2019/816 of the European Parliament and of the Council of\n17 April 2019 establishing a centralised system for the identification of\nMember States holding conviction information on third-country nationals and\nstateless persons (ECRIS-TCN) to supplement the European Criminal Records\nInformation System and amending Regulation (EU) 2018/1726 (OJ L 135,\n22.5.2019, p. 1).\n\n7.Interoperability\n\n(a)Regulation (EU) 2019/817 of the European Parliament and of the Council of\n20 May 2019 on establishing a framework for interoperability between EU\ninformation systems in the field of borders and visa (OJ L 135, 22.5.2019, p.\n27).\n\n(b)Regulation (EU) 2019/818 of the European Parliament and of the Council of\n20 May 2019 on establishing a framework for interoperability between EU\ninformation systems in the field of police and judicial cooperation, asylum\nand migration (OJ L 135, 22.5.2019, p. 85)."
},
"relevant_docs": {
"35542879-0e1d-4d2b-b492-26f8263e1563": [
"03245578-ca1e-4d85-ac9e-4fca491a8fa4"
],
"1ce09904-ac01-4b4b-8e0c-c498e486dcd1": [
"03245578-ca1e-4d85-ac9e-4fca491a8fa4"
],
"350653f5-d664-43d7-9920-14d2b3b665a9": [
"11dd086d-6c11-4a19-8ce1-807d63b3ac2a"
],
"ee8c2cce-a595-4d56-b33d-d313e4a7eb9d": [
"11dd086d-6c11-4a19-8ce1-807d63b3ac2a"
],
"bf7ba2cf-aad9-40bd-95a0-d6547cf714e6": [
"f9ff291f-bda2-4e17-86c1-08325a273617"
],
"dadee1d1-6a5a-4d91-a2f9-5f18905bfae5": [
"f9ff291f-bda2-4e17-86c1-08325a273617"
],
"87e08f81-a852-44bf-aa44-149349c54167": [
"a5199f23-bcdc-445d-a1b5-a8e8da0bd715"
],
"edac11b7-fb8a-4314-a890-d8fe6abfa1a7": [
"a5199f23-bcdc-445d-a1b5-a8e8da0bd715"
],
"c8296626-1592-4ddc-a08f-186afc061cb5": [
"51cf2907-1cdb-4f2c-a830-fa19fa1ae686"
],
"8dd7b9c5-6f8b-4513-8c7f-b8a459b28b85": [
"51cf2907-1cdb-4f2c-a830-fa19fa1ae686"
],
"9ebfcd02-f33f-42e8-ae45-bff51f06772a": [
"41e99a3d-407b-4d82-b011-c98d55bfe46f"
],
"40d0cd82-ae3c-434c-9600-a6925d9e2406": [
"41e99a3d-407b-4d82-b011-c98d55bfe46f"
],
"ebe7b2af-2c41-47ef-87c9-5b3d56b994ea": [
"7e3728b3-e60c-4a6a-ab0b-e9a9e1ac9477"
],
"2f0f9c9a-cc37-4dab-a1a3-555a8def0b98": [
"7e3728b3-e60c-4a6a-ab0b-e9a9e1ac9477"
],
"c86e90de-4443-4325-8119-8bff5fe9aadf": [
"7902fa7a-008b-4fc0-9fb7-27ad82ce9f08"
],
"2f98b9cb-90f6-4687-b137-2d36c81dacd4": [
"7902fa7a-008b-4fc0-9fb7-27ad82ce9f08"
],
"a722e121-d222-41b1-aca3-d19229ae3261": [
"78b1722e-2d24-4a3e-ae86-13c54aa19534"
],
"44e1618d-e5ce-4cbb-80fe-a4bff0d7f00e": [
"78b1722e-2d24-4a3e-ae86-13c54aa19534"
],
"45576c86-87d5-4d6a-ab40-db2e6cab1f5a": [
"748908de-d1bd-47f7-8dee-189d44943fc2"
],
"ea92e976-b259-484e-8dc5-66b31670efe2": [
"748908de-d1bd-47f7-8dee-189d44943fc2"
],
"298d3909-cce9-4a64-a7fe-cd67eafd7460": [
"42cffcb5-e37a-44fe-bee9-32f24aadf256"
],
"af48022c-48ea-42bf-8654-f2d15c8e3eb2": [
"42cffcb5-e37a-44fe-bee9-32f24aadf256"
],
"0d1d4a79-e892-452f-9d7c-92e48b43b6e8": [
"d2b4f960-b43e-402b-9037-da3f6e8fbc9f"
],
"78c43dc2-ff1e-4d55-885b-ffd7d2c8ac6f": [
"d2b4f960-b43e-402b-9037-da3f6e8fbc9f"
],
"a6f23470-3273-4b74-ba68-14c570ea4a42": [
"7e60f260-6f64-4516-b132-94eca43d06ab"
],
"56ae400d-c583-446c-a05e-223d2167ec4c": [
"7e60f260-6f64-4516-b132-94eca43d06ab"
],
"5ea3e711-5f88-4e15-97a4-82c9f8a05dd0": [
"982b1d35-3a02-4f1f-943a-4378da0e7c96"
],
"2d013f0d-d962-4d17-91c0-4f56b223ad3d": [
"982b1d35-3a02-4f1f-943a-4378da0e7c96"
],
"af1e8e07-ceb9-4077-87b3-33b297453635": [
"ab56beb4-8ab7-4db0-ab92-0133825cde9a"
],
"0b7ee314-97a3-4faa-b7c3-08c2f947bc66": [
"ab56beb4-8ab7-4db0-ab92-0133825cde9a"
],
"9c781c0d-26c5-4244-943d-530a2dba4f48": [
"b738438c-edb2-4e8d-af51-1bbb0cf90798"
],
"6e5e563d-0ab3-4a13-b763-43750b5f9f70": [
"b738438c-edb2-4e8d-af51-1bbb0cf90798"
],
"bbdaf4d4-2991-498e-8420-fcbcc8b2c4c5": [
"29987751-33be-414b-9307-c7807acfe1b6"
],
"96a329b7-92dd-421c-8b0a-03ec658fd849": [
"29987751-33be-414b-9307-c7807acfe1b6"
],
"b491e7db-a3b6-4748-9f11-4f5c2438b07b": [
"38ea2640-90a3-4af7-999a-b69ed2a1ddf8"
],
"1f9d8bf9-14f7-4306-8023-7faab9c0ffaa": [
"38ea2640-90a3-4af7-999a-b69ed2a1ddf8"
],
"a29b24f7-adda-4922-8e1e-493dd6e5f95c": [
"eb05be6d-82cf-402a-ac4d-cecbb979ddb8"
],
"cb8f77e6-1d32-4a90-a581-a7754999f688": [
"eb05be6d-82cf-402a-ac4d-cecbb979ddb8"
],
"7b2d487e-e96e-4e02-b934-beb49a366ad7": [
"b38aa789-7f23-4da4-9da3-4589f0985436"
],
"8bafcb0b-84ca-4188-b28c-05987f0eeb37": [
"b38aa789-7f23-4da4-9da3-4589f0985436"
],
"0823b08c-f32c-4c0f-a59d-6102b7e2b416": [
"a3f4c0b6-a7c0-4db7-acae-2b3785bd73dc"
],
"1fccd898-07e7-4e0a-ac54-f87cdde03d78": [
"a3f4c0b6-a7c0-4db7-acae-2b3785bd73dc"
],
"d098a7dc-22b8-467d-aac6-0b2101a44eae": [
"0a7f300d-22b2-43f4-801c-55d358a0cd95"
],
"6a21cdcd-fb5c-464c-8c48-722e80da5fc4": [
"0a7f300d-22b2-43f4-801c-55d358a0cd95"
],
"5795d8c3-0854-463c-9ba7-7618dac26c7b": [
"030500a1-15da-49de-ab82-85f033486b65"
],
"b5155921-7773-451f-92f5-56cbeae015b7": [
"030500a1-15da-49de-ab82-85f033486b65"
],
"be2f5b8e-cce7-4016-8402-2d30f4e437cf": [
"98ea4c32-2ab5-41d3-9b8d-07f8823e8ead"
],
"c226bcae-6ed6-499b-910e-b782c8834478": [
"98ea4c32-2ab5-41d3-9b8d-07f8823e8ead"
],
"7f59105d-c848-49e1-a04a-1e19af690b84": [
"24ccfe7d-de4b-4363-8ff8-d55577cb0622"
],
"13213dfd-81ab-4df6-92e6-d1e4fab03474": [
"24ccfe7d-de4b-4363-8ff8-d55577cb0622"
],
"9bc24bef-c923-432d-bb1f-6c1815245a9d": [
"25f75982-c50b-4778-93d0-15c589c5e3e5"
],
"14764a25-d9f2-45c5-92c1-edb94bc4a62e": [
"25f75982-c50b-4778-93d0-15c589c5e3e5"
],
"cc716bb3-b702-4615-96c1-913cbf1ab105": [
"d4cbfa73-28bf-45b4-8bdf-a37cb739655f"
],
"49e2f0a5-4183-4cbe-b0e1-c1fb149765d8": [
"d4cbfa73-28bf-45b4-8bdf-a37cb739655f"
],
"9f6926ab-f652-404a-8b18-4cc5e5a36695": [
"b0cc4e44-e7d7-4106-847b-133fcade3740"
],
"a136870c-3607-420e-86ed-0663aa628ed8": [
"b0cc4e44-e7d7-4106-847b-133fcade3740"
],
"682c8f2a-c5fa-4b3d-aef2-de8218bf12cd": [
"8c748186-5974-4c20-b358-a23e48a0b1c5"
],
"90fab33f-4e5b-489f-8e98-c68529ef5d8a": [
"8c748186-5974-4c20-b358-a23e48a0b1c5"
],
"31066827-c9f7-4bd6-9cc0-6f5468090f18": [
"00d89864-aae6-42a2-a1e2-a7a44c05a99c"
],
"0679698f-dcbc-4385-b710-53f85457b96e": [
"00d89864-aae6-42a2-a1e2-a7a44c05a99c"
],
"769bda47-4a52-41a3-abef-8fd007cdafa7": [
"f262e389-043f-4b99-a9cd-7966f9abcf2d"
],
"63ac9716-b17d-4f4a-8a4e-95f0040fdd1c": [
"f262e389-043f-4b99-a9cd-7966f9abcf2d"
],
"1d7d8fc6-3c5e-4d82-bbe1-626b268a7a58": [
"a010eb89-971a-4228-9bfb-d0868982e676"
],
"d7bf6bb7-10f1-4133-b268-279ed5acc466": [
"a010eb89-971a-4228-9bfb-d0868982e676"
],
"cd2ab6e0-f40b-40fa-9299-3c28642f7585": [
"b7abaa44-260a-4208-8e9c-9f98ab236247"
],
"2a2e6c9d-8852-49ca-9df9-e0d454961918": [
"b7abaa44-260a-4208-8e9c-9f98ab236247"
],
"b12561fe-a769-4a8d-87c0-26291bff5897": [
"72e09799-04b4-49e8-b82c-29d3990967b2"
],
"5074cab6-a139-46de-a15b-e53e6dfcd410": [
"72e09799-04b4-49e8-b82c-29d3990967b2"
],
"3dd13556-7566-4640-86a9-8474a00d807e": [
"467de78f-78b5-4ecd-b8d5-273a5532856f"
],
"a18073e6-90e8-4f2b-a9b0-454cef8733d3": [
"467de78f-78b5-4ecd-b8d5-273a5532856f"
],
"308a159f-05ed-440c-8b7e-9ccd66b99909": [
"8ad6d948-ea50-4cbd-93a5-92860de7e2c8"
],
"14359508-c8d5-479c-b25b-c508043c8d6b": [
"8ad6d948-ea50-4cbd-93a5-92860de7e2c8"
],
"8f1722f5-06c6-4e66-9833-b0e5191eaab4": [
"95c1e8a9-c746-4879-aa55-a1c0d24ea275"
],
"737cdf8d-8b00-4c7d-86ca-085819633c2d": [
"95c1e8a9-c746-4879-aa55-a1c0d24ea275"
],
"e456b78c-03af-48ce-b857-bfeb9bf79a1a": [
"31741773-414a-4d6d-935c-9ccb81899c28"
],
"3f95b6ba-b6d0-42e0-9afc-4bcdcbeeb974": [
"31741773-414a-4d6d-935c-9ccb81899c28"
],
"60c5d46a-6d91-4e70-b810-1d2f2b869961": [
"65a44e8d-81fd-49ad-a762-0f03d26f54f3"
],
"263fed9a-7b3d-42d7-8c6b-dabebda554c6": [
"65a44e8d-81fd-49ad-a762-0f03d26f54f3"
],
"1f6f99d8-f16a-4bab-9acf-2a789e2752cc": [
"0341e3ab-9d7c-4a28-8419-010a95c63b7f"
],
"6c2468b8-6a75-4771-9c2e-bf8de9daba41": [
"0341e3ab-9d7c-4a28-8419-010a95c63b7f"
],
"5c0a8019-f164-49f5-93bd-55903e065a52": [
"efbdacf3-d4ea-4e20-a609-04835d23da7b"
],
"b7503949-77f5-424e-85a6-e278400be4ff": [
"efbdacf3-d4ea-4e20-a609-04835d23da7b"
],
"66cc0ee1-f6ca-4a43-9787-4f07e5075c51": [
"998606e2-6710-4b67-97e0-3ff5068b4981"
],
"673521c3-5c68-43f8-84e0-894b169c5fe7": [
"998606e2-6710-4b67-97e0-3ff5068b4981"
],
"4b15933f-632d-403e-ae1e-2425e20cca6f": [
"9cc2f66a-1157-4c0e-8d32-681202f99e42"
],
"f1a67057-8961-4765-87ad-3078b1cb75bb": [
"9cc2f66a-1157-4c0e-8d32-681202f99e42"
],
"b6dc219a-3ce8-44f5-ae48-7bf0d2547f80": [
"9f274be8-4aa1-4386-985d-7e58d339f184"
],
"21f17b0c-85cb-4c9a-a39b-892f4c3c45d2": [
"9f274be8-4aa1-4386-985d-7e58d339f184"
],
"004419d0-ea67-4b94-a474-397c5da5ff25": [
"4024673d-b875-4fee-8312-b1beb9c375f9"
],
"695ec8ef-8bc9-4c0c-8619-5f835d7b9a62": [
"4024673d-b875-4fee-8312-b1beb9c375f9"
],
"230080f1-d70b-4d74-98e4-934cdafabed1": [
"dbdda4ce-1bbe-4ede-aa99-3de01bd47851"
],
"e28534bb-e95e-40c6-95f7-edd4a8195840": [
"dbdda4ce-1bbe-4ede-aa99-3de01bd47851"
],
"645b3fb0-3972-4a9c-8132-495d6acf17d7": [
"17da604c-40a3-4f34-8308-8acb2427ceae"
],
"ad329bff-0a2b-4977-b401-dd926c5e0573": [
"17da604c-40a3-4f34-8308-8acb2427ceae"
],
"ebc135a5-2a4f-4e1b-8742-7394bbe26882": [
"bf244202-3dcc-41ff-b2c4-3212dcb17646"
],
"92e91c85-c988-4ae8-b7c2-e32540b7cca8": [
"bf244202-3dcc-41ff-b2c4-3212dcb17646"
],
"5dc17a4e-94e4-4e8f-a24e-9d770faef615": [
"a081285b-90a6-4ec8-af54-757bc889574c"
],
"2b531e83-a776-4195-a1ec-ae6652b8c03e": [
"a081285b-90a6-4ec8-af54-757bc889574c"
],
"0b027062-f411-4751-a47c-d252d2a83808": [
"1101bd69-cec5-4620-887c-9c1eb642cb18"
],
"6fb9cefd-c4e5-4e34-ac19-88fb5fb3cafb": [
"1101bd69-cec5-4620-887c-9c1eb642cb18"
],
"574650a9-c527-4e52-9db7-98b888848dd4": [
"6baa1ddc-34a1-4064-8efb-f7752d6abea9"
],
"803acb47-093c-4a8c-9d64-0db9282d9a4e": [
"6baa1ddc-34a1-4064-8efb-f7752d6abea9"
],
"822e3c52-e35d-4f15-bad9-cb64324c6db5": [
"e4ed2eb8-1551-4cf3-8341-78dc99b0c3f6"
],
"6fdd278c-5d97-4d80-8316-c44fd591d312": [
"e4ed2eb8-1551-4cf3-8341-78dc99b0c3f6"
],
"98e7c8a5-3d75-4923-9371-5ee046fa806a": [
"ad01a621-97c0-4698-be0b-4338399f22bc"
],
"e8176bcb-f8ee-4049-a159-351ed2d587d1": [
"ad01a621-97c0-4698-be0b-4338399f22bc"
],
"7f9c3728-739b-4e2e-9646-c377e9e2b55a": [
"3df81ab3-1d02-4e96-8b73-3e14bcf90079"
],
"7bb312dd-abd9-4331-9b96-d2e804ea74f9": [
"3df81ab3-1d02-4e96-8b73-3e14bcf90079"
],
"5f3230b1-8b25-45e8-a76e-a915b9b64f1b": [
"6dc28c03-3dd4-473c-8c03-19daed9ce7be"
],
"dec2d034-015e-480d-8301-d185012d0f3c": [
"6dc28c03-3dd4-473c-8c03-19daed9ce7be"
],
"cd3ee4a5-88b4-4532-8242-0bcf46b57519": [
"b1f8a5c7-5d43-4e3d-9c41-114aee6eb9e0"
],
"49ad8201-d62c-439f-9ff0-bbad0b177425": [
"b1f8a5c7-5d43-4e3d-9c41-114aee6eb9e0"
],
"ae93f02b-aa93-423f-8d3f-c1120524913d": [
"eed57da1-7fe8-44b9-b8c6-3b6245fb32ad"
],
"1ef35f06-303f-4316-9353-6447cbf20878": [
"eed57da1-7fe8-44b9-b8c6-3b6245fb32ad"
],
"1d44f6f5-ccc6-49a5-8d0d-a3f8a6b16648": [
"454a95f2-f6d7-4db9-8738-79175a3e4adc"
],
"2254a295-94b9-4f51-b51f-af85dc008b4e": [
"454a95f2-f6d7-4db9-8738-79175a3e4adc"
],
"67b11309-b3e2-4c03-a5b0-47762008339b": [
"adcde755-920b-4ab4-a38e-189536c6e0de"
],
"cf023089-943c-48c9-bbac-888d1abe7d3d": [
"adcde755-920b-4ab4-a38e-189536c6e0de"
],
"6484e0d0-6054-4ff9-80a6-c99249d99d65": [
"53f8b176-7759-4362-9c5d-443c0beee9d6"
],
"7156513b-5306-451d-bad4-d7b661e4b2bc": [
"53f8b176-7759-4362-9c5d-443c0beee9d6"
],
"4c0b72d8-6017-43a8-8160-c624d7911586": [
"869396a8-ad52-4320-834c-433e7219d751"
],
"782ddf45-61e3-4755-ba10-1e2960859ee9": [
"869396a8-ad52-4320-834c-433e7219d751"
],
"ed9a4a15-bd9e-4136-8f74-6f9e8f983360": [
"53c6bf36-8083-4c04-8634-e3826f160445"
],
"878d51a9-3f7c-483b-a458-5f29bf488d12": [
"53c6bf36-8083-4c04-8634-e3826f160445"
],
"715a29fe-c93a-411a-b3cc-3815211ce498": [
"953f012b-a3e9-485f-8006-13c9307eccf6"
],
"04015c1a-33fa-4800-a546-2592b1a3a7b1": [
"953f012b-a3e9-485f-8006-13c9307eccf6"
],
"53a0fb71-2773-46bd-91bf-fe82cbcb4830": [
"f86d03df-3463-480c-a3a9-1d054159204e"
],
"01055908-de6f-486a-86d8-89b498b5529a": [
"f86d03df-3463-480c-a3a9-1d054159204e"
],
"559f2fd3-e188-4f04-a7ea-7d0775bae9f2": [
"fa5e0764-204d-413a-afc0-2d0bbbc0580e"
],
"12ad50c8-1c81-4519-a515-da5548ea39c0": [
"fa5e0764-204d-413a-afc0-2d0bbbc0580e"
],
"203be7be-62fc-4747-b78b-bb7569f13570": [
"89d870dc-70ad-485e-9ecf-4cc8ff38d5cd"
],
"73164917-31cf-4a39-b847-e13fad694dce": [
"89d870dc-70ad-485e-9ecf-4cc8ff38d5cd"
],
"0d884677-0091-4adf-8f57-dd939a3ee4fd": [
"9ee22d0c-41cd-456c-bc6a-b9b5c64bafbd"
],
"a20a86bf-2c3b-4026-ba2c-f29e44c386f2": [
"9ee22d0c-41cd-456c-bc6a-b9b5c64bafbd"
],
"77319657-3bb7-4ca5-8a41-523a910404f0": [
"d582a86a-8704-4281-a071-c2331bbb0e0c"
],
"27e05db8-96e6-430c-ad1d-8f8a7a3eab78": [
"d582a86a-8704-4281-a071-c2331bbb0e0c"
],
"9dac45b7-4501-48fc-a61c-49656ef43127": [
"c18022a7-529e-4273-accd-eaf024dc74f5"
],
"31ee4bcb-1f3d-4577-8445-a17b2c2f92fc": [
"c18022a7-529e-4273-accd-eaf024dc74f5"
],
"b49b7176-1051-4fa5-a921-bdfccbeb9390": [
"3e5ae392-3aa1-448f-8895-87a70aaebb33"
],
"c52749c0-26cf-4325-8b3d-7d2e61b855e6": [
"3e5ae392-3aa1-448f-8895-87a70aaebb33"
],
"87350ed4-5224-4962-9321-2dafa8de6fef": [
"9d90027d-86a5-4562-9498-b9429c0bd9c2"
],
"a1c66485-5d69-426a-ae92-bc7e23e5c0c8": [
"9d90027d-86a5-4562-9498-b9429c0bd9c2"
],
"6afe7a04-d0e1-45be-a1d0-7f34d652b5fc": [
"f530109d-a976-49ef-a079-481cd306b524"
],
"52164ce7-f8e0-4e75-a31a-2dc9e18c3dfe": [
"f530109d-a976-49ef-a079-481cd306b524"
],
"6d3e3117-3b45-4b40-b785-9f979c597de0": [
"3c3daf6c-9b3d-42f7-90d5-cf4cd3bd27b6"
],
"779b49f0-bd3d-487d-bd26-63cade8d02d4": [
"3c3daf6c-9b3d-42f7-90d5-cf4cd3bd27b6"
],
"f576b484-825b-41fa-8892-265dee270394": [
"0b349579-2999-4fcb-ade0-090a5b384899"
],
"3113eeb1-e8ba-4761-be4d-da4d183311ed": [
"0b349579-2999-4fcb-ade0-090a5b384899"
],
"007eadc6-6366-4453-bc52-173df63d0390": [
"e5da2399-5778-415a-a000-fc0ee787412b"
],
"01836290-e509-4eef-b30d-35fbc47bac18": [
"e5da2399-5778-415a-a000-fc0ee787412b"
],
"d6c8681d-5bb0-4383-8eff-764a5784bf7d": [
"78c1c061-7077-4ab4-8d3b-82d0c44fb941"
],
"a6588e8d-673f-49ff-b1c2-1c6cfedc7053": [
"78c1c061-7077-4ab4-8d3b-82d0c44fb941"
],
"479b15f5-8e07-49c1-9eef-f9105ab613a3": [
"f36840e7-46d1-452d-afea-a24079a6d202"
],
"7d9757a4-f0fc-407d-ae2e-3a2393f5aa65": [
"f36840e7-46d1-452d-afea-a24079a6d202"
],
"50c830d4-365f-4a69-950b-6d3f9e91a35a": [
"9e57434d-4a2a-415f-9ecb-e74cfaf00b80"
],
"d41daa09-f3e0-4f94-8a71-ff696f1720f8": [
"9e57434d-4a2a-415f-9ecb-e74cfaf00b80"
],
"380c4024-c30c-4176-b1ee-4164f38e2ead": [
"beea6879-dad3-4d83-bc5e-f787bb3eac57"
],
"0fed0844-0cf4-4070-8d7b-d8b1149991a3": [
"beea6879-dad3-4d83-bc5e-f787bb3eac57"
],
"4a5c8522-a95f-4a50-87f6-ee9e2e0c21e5": [
"dc8cd6dd-880f-43a6-94f9-bcda5df70247"
],
"6ca44c4c-c784-47e0-9301-9f9eb2b54eee": [
"dc8cd6dd-880f-43a6-94f9-bcda5df70247"
],
"60fa7103-2d9e-469e-8825-7b38c412243e": [
"fe0d20f6-9866-483e-803a-fcfff7121a75"
],
"5bd69dd6-3e97-4ff8-b29c-b3b2a21a5ba0": [
"fe0d20f6-9866-483e-803a-fcfff7121a75"
],
"5df35a6d-ae7f-4f60-964d-ef34325af6a1": [
"0f9f9b4f-ef2f-4000-895e-971c7244c637"
],
"b0723e17-2500-4f2f-b53b-6b00e04797fd": [
"0f9f9b4f-ef2f-4000-895e-971c7244c637"
],
"90179cff-0e39-4e45-ad02-7560c36165ef": [
"49aecd20-ae79-4098-9a8f-407ef8e13126"
],
"cad29db6-2589-4c02-93ea-51c300b50edc": [
"49aecd20-ae79-4098-9a8f-407ef8e13126"
],
"1515458e-8a2f-4594-b23f-e75a33ce8a0c": [
"ebb5a6a8-51a4-412b-b6c5-1fe8f374304e"
],
"6bd5a001-f63c-4ba2-90eb-e6e69e0cca50": [
"ebb5a6a8-51a4-412b-b6c5-1fe8f374304e"
],
"5ec39daf-0e0b-4b13-85a5-f1e7ab108c6a": [
"265d82e1-4e4f-42d4-ac22-fb000797b0f3"
],
"2e6e81ae-21b5-417a-bdcd-86459b8bf05c": [
"265d82e1-4e4f-42d4-ac22-fb000797b0f3"
],
"f820382d-ee5b-4d58-823f-ad48b0e2af49": [
"29e856d3-82ad-4d0a-9b9a-2bd06240fa4d"
],
"bc2f027f-029b-45ac-8554-4ab2f0c99d7e": [
"29e856d3-82ad-4d0a-9b9a-2bd06240fa4d"
],
"bc717280-dadf-4593-868d-03bb7ecee21d": [
"e7651dc3-0697-4e61-9d75-31fed0252ac2"
],
"f6abd919-3ad4-4859-87bc-9c7e2a5f3ca2": [
"e7651dc3-0697-4e61-9d75-31fed0252ac2"
],
"d8b124f6-7347-408d-ae9e-2304383a45c8": [
"234761d8-2e8d-49ce-b109-f6f81c784001"
],
"1faf7245-40ae-4a57-a16e-632b80689295": [
"234761d8-2e8d-49ce-b109-f6f81c784001"
],
"8ab7f89f-4992-47d6-a286-0334e186fa70": [
"111e4e21-c103-49df-b516-4357d244a3fa"
],
"7e858825-18f8-4b47-9e07-8c4b96389585": [
"111e4e21-c103-49df-b516-4357d244a3fa"
],
"795303b6-2927-49a5-a62e-c0ef4612ace7": [
"f07f4b4f-8506-49b2-87bf-bded5db3d7f0"
],
"e39802bc-4678-4eff-a818-71db078abc93": [
"f07f4b4f-8506-49b2-87bf-bded5db3d7f0"
],
"92ce6bdf-23c2-49a9-81c7-bd4942a42c55": [
"9f12ca0c-d435-4f5b-8ef6-1f82a017bb3b"
],
"ec5790ea-93ca-4efe-ab03-ce5e61fc3870": [
"9f12ca0c-d435-4f5b-8ef6-1f82a017bb3b"
],
"968485fa-ab5c-4a4e-8729-397b52ac2b8c": [
"6348f837-b100-4983-8783-890a06486a8f"
],
"9e92e57f-d505-4583-b9bb-1091e0c6beb5": [
"6348f837-b100-4983-8783-890a06486a8f"
],
"b59cc0b4-d287-47c5-9678-80d2a4324b72": [
"3dce8135-5b30-4bb1-8403-7a64f25f4bd2"
],
"5c0a8b97-5ea4-486d-8a01-3b8295a399bb": [
"3dce8135-5b30-4bb1-8403-7a64f25f4bd2"
]
},
"mode": "text"
}