diff --git a/lichess.bib b/lichess.bib index d9c5ae1..b1dd763 100644 --- a/lichess.bib +++ b/lichess.bib @@ -421,6 +421,17 @@ @article{mok:2021:time-online-digital-well-being keywords = {online well-being, problematic platform use, specification curve analysis, survey methodology}, } +@phdthesis{mok:2024:measuring-digital-welfare-online-systems, + title = {Measuring the Digital Welfare of Online Social Systems}, + author = {Mok, Lillio}, + year = {2024}, + note = {http://hdl.handle.net/1807/140863}, + school = {Univesity of Toronto}, + keywords = {Computationsl Social Science, Data Science, Human-AI Interaction, Human-Computer Interaction, Web Science}, + annote = {We rely on all manners of digital systems to organize and facilitate our human functions. From social networks connecting us to each other, to content providers keeping us perpetually entertained, to search engines serving each of our informational needs, to computational models informing us how healthy we are, to artificially-intelligent coaches supplementing our natural intelligence, every corner of human existence is permeated by the digital tools we create. Accompanying the boons of these systems, however, are increasingly complex risks to our digital health. Our attention is pulled into cyberspace via algorithms that use billions of datapoints to learn what we like, sometimes to the detriment of our physical wellness. Ideological rifts online threaten our societal harmony as partisans become ever more polarized, whose obsession with political content in turn feeds the underbelly of our social media ecosystem. All the while, the same data underpinning these online interactions also allow others to make finely-optimized decisions about us, often to the detriment of the disadvantaged. This thesis offers a more optimistic vision: that the same computational infrastructure powering our potentially perilous systems can be repurposed to help us understand their perils. We first outline a framework for rigorously assessing the welfare of our digital systems through the well-being of individual users, the cohesion of user communities, and whether the systems themselves deserve trust. We then utilize this framework to conduct four empirical studies measuring the extent to which digital welfare is preserved or endangered by data-driven systems. At the level of individual users, we directly measure how spending time on a large-scale chess platform, Lichess, can be perceived as detrimental to personal well-being. We find that perceived harms are explained not only by the time that people believe they spend online, but also the actual time they spend engaging with the platform. For groups of users, we quantify how partisan users on the Reddit platform are selective towards politically-congruent news outlets, thus consuming and disseminating polarized news. Despite the platform appearing polarized on aggregate, we discover that narrow, hyper-partisan communities are responsible for deeply-ingrained ideological segregation. We then extend this result by identifying whether key individuals can influence the news consumption cycle on Reddit. Through an analysis of where news about political figures is shared on Reddit and the language it attracts, we illustrate that nationally-recognizable politicians are selectively discussed more by in-group online communities than they are by in-group news outlets. Out-group communities, on the other hand, generate the most toxic and hateful commentary. At the level of problematic downstream outcomes, we further probe whether people can tell when systems like algorithmic risk assessments harm data subjects in unfair ways. We find that observers are easily distracted by who makes risk assessments rather than how equitable the assessments are, suggesting that the task of welfare measurement itself needs to be made accessible for laypeople at large. This thesis posits that the online social systems jeopardizing our collective welfare can also be used to understand the very dangers they pose. By empirically measuring how well people are doing when they use or are impacted by these systems, we in turn empirically demonstrate the feasibility of this ideal. We conclude by speculating on the imminent ubiquity of artificial intelligence in our cyber-environment and their implications for the work in this thesis.}, + type = {Doctoral Thesis}, +} + @inproceedings{muecke:2022:check-mate-sanity-check-trustworthy-ai, title = {Check Mate: {A} Sanity Check for Trustworthy {AI}}, author = {Sascha M{\"{u}}cke and Lukas Pfahler}, diff --git a/lichess.html b/lichess.html index 41b9c30..aa375e3 100644 --- a/lichess.html +++ b/lichess.html @@ -514,6 +514,11 @@ Dauphin, Percy Liang, and Jennifer Wortman Vaughan, 24482–97, 2021. https://proceedings.neurips.cc/paper/2021/hash/ccf8111910291ba472b385e9c5f59099-Abstract.html. +