-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathreferences.bib
123 lines (123 loc) · 7.71 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
@InProceedings{pmlr-v81-buolamwini18a,
title = {Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification},
author = {Buolamwini, Joy and Gebru, Timnit},
booktitle = {Proceedings of the 1st Conference on Fairness, Accountability and Transparency},
pages = {77--91},
year = {2018},
editor = {Friedler, Sorelle A. and Wilson, Christo},
volume = {81},
series = {Proceedings of Machine Learning Research},
month = {23--24 Feb},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v81/buolamwini18a/buolamwini18a.pdf},
url = {https://proceedings.mlr.press/v81/buolamwini18a.html},
abstract = {Recent studies demonstrate that machine learning algorithms can discriminate based on classes like race and gender. In this work, we present an approach to evaluate bias present in automated facial analysis algorithms and datasets with respect to phenotypic subgroups. Using the dermatologist approved Fitzpatrick Skin Type classification system, we characterize the gender and skin type distribution of two facial analysis benchmarks, IJB-A and Adience. We find that these datasets are overwhelmingly composed of lighter-skinned subjects (79.6% for IJB-A and 86.2% for Adience) and introduce a new facial analysis dataset which is balanced by gender and skin type. We evaluate 3 commercial gender classification systems using our dataset and show that darker-skinned females are the most misclassified group (with error rates of up to 34.7%). The maximum error rate for lighter-skinned males is 0.8%. The substantial disparities in the accuracy of classifying darker females, lighter females, darker males, and lighter males in gender classification systems require urgent attention if commercial companies are to build genuinely fair, transparent and accountable facial analysis algorithms.}
}
@inproceedings{sap-etal-2019-risk,
title = "The Risk of Racial Bias in Hate Speech Detection",
author = "Sap, Maarten and
Card, Dallas and
Gabriel, Saadia and
Choi, Yejin and
Smith, Noah A.",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1163",
doi = "10.18653/v1/P19-1163",
pages = "1668--1678",
abstract = "We investigate how annotators{'} insensitivity to differences in dialect can lead to racial bias in automatic hate speech detection models, potentially amplifying harm against minority populations. We first uncover unexpected correlations between surface markers of African American English (AAE) and ratings of toxicity in several widely-used hate speech datasets. Then, we show that models trained on these corpora acquire and propagate these biases, such that AAE tweets and tweets by self-identified African Americans are up to two times more likely to be labelled as offensive compared to others. Finally, we propose *dialect* and *race priming* as ways to reduce the racial bias in annotation, showing that when annotators are made explicitly aware of an AAE tweet{'}s dialect they are significantly less likely to label the tweet as offensive.",
}
@article{freelon-2018-api,
author = "Deen Freelon",
title = "Computational Research in the Post-API Age",
journal = {Political Communication},
volume = {35},
number = {4},
pages = {665-668},
year = "2018",
publisher = {Routledge},
doi = {10.1080/10584609.2018.1477506},
URL = {
https://doi.org/10.1080/10584609.2018.1477506
},
eprint = {
https://doi.org/10.1080/10584609.2018.1477506
}
}
@article{gizmodo-ring-2019,
author = {Cameron, Dell and Mehrota, Dhruv},
date = {2019-12-08},
title = {Ring’s Hidden Data Let Us Map Amazon's Sprawling Home Surveillance Network},
journal = {Gizmodo},
url = {https://gizmodo.com/ring-s-hidden-data-let-us-map-amazons-sprawling-home-su-1840312279},
urldate = {2022-02-22}
}
@article{willis-plum,
author = {Derek Willis},
date = {2013-04-11},
title = {Freeing the Plum Book},
journal = {Source},
url = {https://source.opennews.org/articles/freeing-plum-book/},
urldate = {2022-02-22}
}
@article{calacci-2022,
author = {Calacci, Dan and Shen, Jeffrey J. and Pentland, Alex},
title = {The Cop In Your Neighbor's Doorbell: Amazon Ring and the Spread of Participatory Mass Surveillance},
year = {2022},
issue_date = {November 2022},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {6},
number = {CSCW2},
url = {https://doi.org/10.1145/3555125},
doi = {10.1145/3555125},
journal = {Proc. ACM Hum.-Comput. Interact.},
month = {nov},
articleno = {400},
numpages = {47},
keywords = {platforms, law enforcement, surveillance, data & society}
}
@ARTICLE{garcia-2018,
author = {{Garcia}, David and {Mitike Kassa}, Yonas and {Cuevas}, Angel and {Cebrian}, Manuel and {Moro}, Esteban and {Rahwan}, Iyad and {Cuevas}, Ruben},
title = "{Analyzing gender inequality through large-scale Facebook advertising data}",
journal = {Proceedings of the National Academy of Science},
keywords = {Computer Science - Computers and Society},
year = 2018,
month = jul,
volume = {115},
number = {27},
pages = {6958-6963},
doi = {10.1073/pnas.1717781115},
archivePrefix = {arXiv},
eprint = {1710.03705},
primaryClass = {cs.CY},
adsurl = {https://ui.adsabs.harvard.edu/abs/2018PNAS..115.6958G},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@inproceedings{princeton-2020,
author = {Major, David and Teixeira, Ross and Mayer, Jonathan},
title = {No WAN's Land: Mapping U.S. Broadband Coverage with Millions of Address Queries to ISPs},
year = {2020},
isbn = {9781450381383},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3419394.3423652},
doi = {10.1145/3419394.3423652},
abstract = {Accurate broadband coverage data is essential for public policy planning and government support programs. In the United States, the Federal Communications Commission is responsible for maintaining national broadband coverage data. Observers have panned the FCC's broadband maps for overstating availability, due to coarsegrained data collection and a low coverage threshold.We demonstrate a new approach to building broadband coverage maps: automated large-scale queries to the public availability checking tools offered by major internet service providers. We reverse engineer the coverage tools for nine major ISPs in the U.S., test over 19 million residential street addresses across nine states for service, and compare the results to the FCC's maps.Our results demonstrate that the FCC's coverage data significantly overstates the availability of each ISP's service, access to any broadband, connection speeds available to consumers, and competition in broadband markets. We also find that the FCC's data disproportionately overstates coverage in rural and minority communities. Our results highlight a promising direction for developing more accurate broadband maps and validating coverage reports.},
booktitle = {Proceedings of the ACM Internet Measurement Conference},
pages = {393–419},
numpages = {27},
location = {Virtual Event, USA},
series = {IMC '20}
}
@BOOK{Metaxa-book,
author={Metaxa, Danaë and Park, Joon Sung and Robertson, Ronald E. and Karahalios, Karrie and Wilson, Christo and Hancock, Jeff and Sandvig, Christian},
booktitle={Auditing Algorithms: Understanding Algorithmic Systems from the Outside In},
year={2021},
volume={},
number={},
pages={},
doi={}}