Skip to content

Commit

Permalink
Pub details fixed
Browse files Browse the repository at this point in the history
  • Loading branch information
Miccighel committed Sep 1, 2024
1 parent 6638635 commit db0ebdf
Show file tree
Hide file tree
Showing 5 changed files with 5 additions and 2 deletions.
4 changes: 3 additions & 1 deletion content/publication/conference-paper-misdoom-2024/cite.bib
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@ @inproceedings{10.1007/978-3-031-71210-4_5
address = {Cham},
series = {Lecture Notes in Computer Science},
pages = {70--85},
doi = {10.1007/978-3-031-71210-4_5},
isbn = {978-3-031-71210-4},
url = {https://link.springer.com/chapter/10.1007/978-3-031-71210-4_5},
numpages = 15,
editor = {Preuss, Mike and Leszkiewicz, Agata and Boucher, Jean-Christopher and Fridman, Ofer and Stampe, Lucas},
abstract = {This paper explores the use of crowdsourcing to classify statement types in film reviews to assess their information quality. Employing the Argument Type Identification Procedure which uses the Periodic Table of Arguments to categorize arguments, the study aims to connect statement types to the overall argument strength and information reliability. Focusing on non-expert annotators in a crowdsourcing environment, the research assesses their reliability based on various factors including language proficiency and annotation experience. Results indicate the importance of careful annotator selection and training to achieve high inter-annotator agreement and highlight challenges in crowdsourcing statement classification for information quality assessment.}
}
}
1 change: 1 addition & 0 deletions content/publication/conference-paper-misdoom-2024/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,5 @@ publication_types:
- '1'
abstract: 'This paper explores the use of crowdsourcing to classify statement types in film reviews to assess their information quality. Employing the Argument Type Identification Procedure which uses the Periodic Table of Arguments to categorize arguments, the study aims to connect statement types to the overall argument strength and information reliability. Focusing on non-expert annotators in a crowdsourcing environment, the research assesses their reliability based on various factors including language proficiency and annotation experience. Results indicate the importance of careful annotator selection and training to achieve high inter-annotator agreement and highlight challenges in crowdsourcing statement classification for information quality assessment.'
publication: '*Disinformation in Open Online Media - 6th Multidisciplinary International Symposium (MISDOOM 2024). Münster, Germany.*'
doi: 10.1007/978-3-031-71210-4_5
---
2 changes: 1 addition & 1 deletion content/publication/journal-paper-tsc-2024/cite.bib
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ @article{10.1145/3674884
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3674884},
url = {https://doi.org/10.1145/3674884},
url = {https://dl.acm.org/doi/10.1145/3674884},
note = {Journal Rank: Scimago (SJR) Q2 (2023)},
abstract = {Crowdsourcing tasks have been widely used to collect a large number of human labels at scale. While some of these tasks are deployed by requesters and performed only once by crowd workers, others require the same worker to perform the same task or a variant of it more than once, thus participating in a so-called longitudinal study. Despite the prevalence of longitudinal studies in crowdsourcing, there is a limited understanding of factors that influence worker participation in them across different crowdsourcing marketplaces. We present results from a large-scale survey of 300 workers on 3 different micro-task crowdsourcing platforms: Amazon Mechanical Turk, Prolific and Toloka. The aim is to understand how longitudinal studies are performed using crowdsourcing. We collect answers about 547 experiences and we analyze them both quantitatively and qualitatively. We synthesize 17 take-home messages about longitudinal studies together with 8 recommendations for task requesters and 5 best practices for crowdsourcing platforms to adequately conduct and support such kinds of studies. We release the survey and the data at: https://osf.io/h4du9/.},
keywords = {Longitudinal Studies, Crowdsourcing Platforms, Surveys, Online Sampling, Amazon Mechanical Turk, Prolific, Toloka}
Expand Down
Binary file modified static/media/Curriculum_EN.pdf
Binary file not shown.
Binary file modified static/media/Curriculum_IT.pdf
Binary file not shown.

0 comments on commit db0ebdf

Please sign in to comment.