-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
refs.bib
245 lines (225 loc) · 18.1 KB
/
refs.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
%% This BibTeX bibliography file was created using BibDesk.
%% https://bibdesk.sourceforge.io/
%% Created for Amir Masoud Abdol at 2020-11-09 15:00:36 +0100
%% Saved with string encoding Unicode (UTF-8)
@inbook{Samejima_1997,
abstract = {The graded response model represents a family of mathematical models that deals with ordered polytomous categories. These ordered categories include rating such as letter grading, A, B, C, D, and F, used in the evaluation of students' performance; strongly disagree, disagree, agree, and strongly agree, used in attitude surveys; or partial credit given in accordance with an examinee's degree of attainment in solving a problem.},
address = {New York, NY},
author = {Samejima, Fumiko},
booktitle = {Handbook of Modern Item Response Theory},
date-added = {2020-11-04 12:50:48 +0100},
date-modified = {2020-11-04 12:51:31 +0100},
doi = {10.1007/978-1-4757-2691-6_5},
editor = {van der Linden, Wim J. and Hambleton, Ronald K.},
isbn = {978-1-4757-2691-6},
pages = {85--100},
publisher = {Springer New York},
title = {Graded Response Model},
url = {https://doi.org/10.1007/978-1-4757-2691-6_5},
year = {1997},
Bdsk-Url-1 = {https://doi.org/10.1007/978-1-4757-2691-6_5}}
@book{Embretson_2000,
author = {Embretson, Susan E and Reise, Steven Paul},
date-added = {2020-11-09 11:00:28 +0100},
date-modified = {2020-11-09 15:00:29 +0100},
publisher = {New Jersey: Lawrence Erlbaum Associates, Publishers},
title = {Item response theory for psychologists. Maheah},
year = {2000}}
@article{DerSimonian_1986,
author = {Rebecca DerSimonian and Nan Laird},
date-added = {2020-10-26 14:56:13 +0100},
date-modified = {2020-10-26 14:56:13 +0100},
doi = {10.1016/0197-2456(86)90046-2},
journal = {Controlled Clinical Trials},
month = {sep},
number = {3},
pages = {177--188},
publisher = {Elsevier {BV}},
title = {Meta-analysis in clinical trials},
url = {https://doi.org/10.1016%2F0197-2456%2886%2990046-2},
volume = {7},
year = 1986,
Bdsk-Url-1 = {https://doi.org/10.1016%2F0197-2456%2886%2990046-2},
Bdsk-Url-2 = {https://doi.org/10.1016/0197-2456(86)90046-2}}
@inbook{Wilcoxon_1992,
abstract = {The comparison of two treatments generally falls into one of the following two categories: (a) we may have a number of replications for each of the two treatments, which are unpaired, or (b) we may have a number of paired comparisons leading to a series of differences, some of which may be positive and some negative. The appropriate methods for testing the significance of the differences of the means in these two cases are described in most of the textbooks on statistical methods.},
address = {New York, NY},
author = {Wilcoxon, Frank},
booktitle = {Breakthroughs in Statistics: Methodology and Distribution},
date-added = {2020-10-26 13:28:04 +0100},
date-modified = {2020-10-26 13:28:13 +0100},
doi = {10.1007/978-1-4612-4380-9_16},
editor = {Kotz, Samuel and Johnson, Norman L.},
isbn = {978-1-4612-4380-9},
pages = {196--202},
publisher = {Springer New York},
title = {Individual Comparisons by Ranking Methods},
url = {https://doi.org/10.1007/978-1-4612-4380-9_16},
year = {1992},
Bdsk-Url-1 = {https://doi.org/10.1007/978-1-4612-4380-9_16}}
@article{Yuen_1974,
abstract = {{The effect of nonnormality on the Weleh approximate degrees of freedom t test is demonstrated. A two-sample trimmed t statistic for unequal population variances is proposed and its performance is also evaluated in comparison to the Welch t test under normality and under long-tailed distributions. If the underlying distributions is long-tailed or contaminated with outliers, the trimmed t is strongly recommended.}},
author = {YUEN, KAREN K.},
date-added = {2020-10-26 13:25:01 +0100},
date-modified = {2020-10-26 13:36:00 +0100},
doi = {10.1093/biomet/61.1.165},
issn = {0006-3444},
journal = {Biometrika},
month = {04},
number = {1},
pages = {165-170},
title = {{The two-sample trimmed t for unequal population variances}},
url = {https://doi.org/10.1093/biomet/61.1.165},
volume = {61},
year = {1974},
Bdsk-Url-1 = {https://doi.org/10.1093/biomet/61.1.165}}
@article{Abdol_2021,
abstract = {{Meta-researchers increasingly study biases in quantitative study outcomes (effect sizes) that emerge from questionable research practices (QRPs) in designing, running, analyzing, and reporting studies. Here, we introduce an extensible and modular C++ simulation framework called \href{https://sam.amirmasoudabdol.name/}{SAM} (Science Abstract Model) that enables systematic study of the effects of QRPs and researchers’ degrees of freedom (\emph{p}-hacking) on a host of outcomes across the different phases of quantitative studies that test hypotheses. SAM achieves this by modular modeling of different entities and processes involved in research, from study designs and inferential criteria, the data collection and analyses, to the submission and acceptance of manuscripts in a journal. We demonstrate the advantages of our approach by reproducing and extending the Bakker, van Dijk, and Wicherts (2012) simulation study that investigated the effects of various \emph{p}-hacking methods and publication bias on meta-analytic outcomes. We showcase how SAM’s modularity and flexibility makes it possible to easily examine the original study by modifying, adding, or removing different components— e.g., publication bias, different significance levels, or meta-analytic metrics. We focus our illustration on the fundamental question of whether lowering $\alpha$ will reduce the biases in the scientific literature.}},
author = {Abdol, Amir M. and Wicherts, Jelte M.},
date-added = {2021-09-18},
DOI = {10.31234/osf.io/zy29t},
journal = {PsyArXiv},
month = {09},
title = {{Science Abstract Model Simulation Framework}},
url = {https://psyarxiv.com/zy29t},
year = {2021},
day = {18},
Bdsk-Url-1 = {https://doi.org/}}
@article{Friese_2020aa,
abstract = {Science depends on trustworthy evidence. Thus, a biased scientific record is of questionable value because it impedes scientific progress, and the public receives advice on the basis of unreliable evidence that has the potential to have far-reaching detrimental consequences. Meta-analysis is a technique that can be used to summarize research evidence. However, meta-analytic effect size estimates may themselves be biased, threatening the validity and usefulness of meta-analyses to promote scientific progress. Here, we offer a large-scale simulation study to elucidate how p-hacking and publication bias distort meta-analytic effect size estimates under a broad array of circumstances that reflect the reality that exists across a variety of research areas. The results revealed that, first, very high levels of publication bias can severely distort the cumulative evidence. Second, p-hacking and publication bias interact: At relatively high and low levels of publication bias, p-hacking does comparatively little harm, but at medium levels of publication bias, p-hacking can considerably contribute to bias, especially when the true effects are very small or are approaching zero. Third, p-hacking can severely increase the rate of false positives. A key implication is that, in addition to preventing p-hacking, policies in research institutions, funding agencies, and scientific journals need to make the prevention of publication bias a top priority to ensure a trustworthy base of evidence. (PsycInfo Database Record (c) 2020 APA, all rights reserved).},
author = {Friese, Malte and Frankenbach, Julius},
date-added = {2020-10-19 10:28:36 +0200},
date-modified = {2020-10-19 10:30:30 +0200},
doi = {10.1037/met0000246},
journal = {Psychol Methods},
journal-full = {Psychological methods},
month = {Aug},
number = {4},
pages = {456-471},
pmid = {31789538},
pst = {ppublish},
title = {p-Hacking and publication bias interact to distort meta-analytic effect size estimates},
volume = {25},
year = {2020},
Bdsk-Url-1 = {https://doi.org/10.1037/met0000246}}
@article{John_2012aa,
abstract = {Cases of clear scientific misconduct have received significant media attention recently, but less flagrantly questionable research practices may be more prevalent and, ultimately, more damaging to the academic enterprise. Using an anonymous elicitation format supplemented by incentives for honest reporting, we surveyed over 2,000 psychologists about their involvement in questionable research practices. The impact of truth-telling incentives on self-admissions of questionable research practices was positive, and this impact was greater for practices that respondents judged to be less defensible. Combining three different estimation methods, we found that the percentage of respondents who have engaged in questionable practices was surprisingly high. This finding suggests that some questionable practices may constitute the prevailing research norm.},
author = {John, Leslie K and Loewenstein, George and Prelec, Drazen},
date-added = {2020-10-08 17:15:16 +0200},
date-modified = {2020-10-08 17:16:52 +0200},
doi = {10.1177/0956797611430953},
journal = {Psychol Sci},
journal-full = {Psychological science},
mesh = {Data Collection; Humans; Motivation; Psychology; Research; Scientific Misconduct; Truth Disclosure},
month = {May},
number = {5},
pages = {524-32},
pmid = {22508865},
pst = {ppublish},
title = {Measuring the prevalence of questionable research practices with incentives for truth telling},
volume = {23},
year = {2012},
Bdsk-Url-1 = {https://doi.org/10.1177/0956797611430953}}
@article{Agnoli_2017,
author = {Franca Agnoli and Jelte M. Wicherts and Coosje L. S. Veldkamp and Paolo Albiero and Roberto Cubelli},
date-added = {2020-10-08 17:12:14 +0200},
date-modified = {2020-10-08 17:12:14 +0200},
doi = {10.1371/journal.pone.0172792},
editor = {Jakob Pietschnig},
journal = {{PLOS} {ONE}},
month = {mar},
number = {3},
pages = {e0172792},
publisher = {Public Library of Science ({PLoS})},
title = {Questionable research practices among italian research psychologists},
url = {https://doi.org/10.1371%2Fjournal.pone.0172792},
volume = {12},
year = 2017,
Bdsk-Url-1 = {https://doi.org/10.1371%2Fjournal.pone.0172792},
Bdsk-Url-2 = {https://doi.org/10.1371/journal.pone.0172792}}
@article{Duval_2000,
abstract = {Summary. We study recently developed nonparametric methods for estimating the number of missing studies that might exist in a meta-analysis and the effect that these studies might have had on its outcome. These are simple rank-based data augmentation techniques, which formalize the use of funnel plots. We show that they provide effective and relatively powerful tests for evaluating the existence of such publication bias. After adjusting for missing studies, we find that the point estimate of the overall effect size is approximately correct and coverage of the effect size confidence intervals is substantially improved, in many cases recovering the nominal confidence levels entirely. We illustrate the trim and fill method on existing meta-analyses of studies in clinical trials and psychometrics.},
author = {Duval, Sue and Tweedie, Richard},
date-added = {2020-10-07 15:17:29 +0200},
date-modified = {2020-10-07 15:17:54 +0200},
doi = {10.1111/j.0006-341X.2000.00455.x},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.0006-341X.2000.00455.x},
journal = {Biometrics},
number = {2},
pages = {455-463},
title = {Trim and Fill: A Simple Funnel-Plot--Based Method of Testing and Adjusting for Publication Bias in Meta-Analysis},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.0006-341X.2000.00455.x},
volume = {56},
year = {2000},
Bdsk-Url-1 = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.0006-341X.2000.00455.x},
Bdsk-Url-2 = {https://doi.org/10.1111/j.0006-341X.2000.00455.x}}
@article{Ioannidis_2007,
author = {John PA Ioannidis and Thomas A Trikalinos},
date-added = {2020-10-07 15:12:34 +0200},
date-modified = {2020-10-07 15:12:34 +0200},
doi = {10.1177/1740774507079441},
journal = {Clinical Trials: Journal of the Society for Clinical Trials},
month = {jun},
number = {3},
pages = {245--253},
publisher = {{SAGE} Publications},
title = {An exploratory test for an excess of significant findings},
url = {https://doi.org/10.1177%2F1740774507079441},
volume = {4},
year = 2007,
Bdsk-Url-1 = {https://doi.org/10.1177%2F1740774507079441},
Bdsk-Url-2 = {https://doi.org/10.1177/1740774507079441}}
@article{Egger_1997,
author = {Egger, Matthias and Smith, George Davey and Schneider, Martin and Minder, Christoph},
date-added = {2020-10-07 15:12:03 +0200},
date-modified = {2020-10-07 15:33:57 +0200},
journal = {BMJ},
number = {7109},
pages = {629--634},
title = {Bias in meta-analysis detected by a simple, graphical test},
volume = {315},
year = {1997}}
@article{Begg_1994,
abstract = {An adjusted rank correlation test is proposed as a technique for identifying publication bias in a meta-analysis, and its operating characteristics are evaluated via simulations. The test statistic is a direct statistical analogue of the popular "funnel-graph." The number of component studies in the meta-analysis, the nature of the selection mechanism, the range of variances of the effect size estimates, and the true underlying effect size are all observed to be influential in determining the power of the test. The test is fairly powerful for large meta-analyses with 75 component studies, but has only moderate power for meta-analyses with 25 component studies. However, in many of the configurations in which there is low power, there is also relatively little bias in the summary effect size estimate. Nonetheless, the test must be interpreted with caution in small meta-analyses. In particular, bias cannot be ruled out if the test is not significant. The proposed technique has potential utility as an exploratory tool for meta-analysts, as a formal procedure to complement the funnel-graph.},
author = {Colin B. Begg and Madhuchhanda Mazumdar},
date-added = {2020-10-07 15:11:21 +0200},
date-modified = {2020-10-07 15:11:35 +0200},
issn = {0006341X, 15410420},
journal = {Biometrics},
number = {4},
pages = {1088--1101},
publisher = {[Wiley, International Biometric Society]},
title = {Operating Characteristics of a Rank Correlation Test for Publication Bias},
url = {http://www.jstor.org/stable/2533446},
volume = {50},
year = {1994},
Bdsk-Url-1 = {http://www.jstor.org/stable/2533446}}
@article{Bakker_2014,
author = {Marjan Bakker and Jelte M. Wicherts},
date-added = {2020-09-24 17:10:01 +0200},
date-modified = {2020-09-24 17:10:01 +0200},
doi = {10.1037/met0000014},
journal = {Psychological Methods},
number = {3},
pages = {409--427},
publisher = {American Psychological Association ({APA})},
title = {Outlier removal, sum scores, and the inflation of the type I error rate in independent samples t tests: The power of alternatives and recommendations.},
url = {https://doi.org/10.1037%2Fmet0000014},
volume = {19},
year = 2014,
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxDALi4vLi4vRGF0YWJhc2VzL1RpbGJ1cmcuZHRCYXNlMi9GaWxlcy5ub2luZGV4L3BkZi81LzIwMTRfT3V0bGllciByZW1vdmFsLCBzdW0gc2NvcmVzLCBhbmQgdGhlIGluZmxhdGlvbiBvZiB0aGUgdHlwZSBpIGVycm9yIHJhdGUgaW4gaW5kZXBlbmRlbnQgc2FtcGxlcyB0IHRlc3RzIFRoZSBwb3dlciBvZiBhbHRlcm5hdGl2ZXMgYW4ucGRmTxEDggAAAAADggACAAAMTWFjaW50b3NoIEhEAAAAAAAAAAAAAAAAAAAAAAAAAEJEAAH/////HzIwMTRfT3V0bGllciByZW1vdiNGRkZGRkZGRi5wZGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP////8AAAAAAAAAAAAAAAAAAgAGAAAKIGN1AAAAAAAAAAAAAAAAAAE1AAACAMovOlVzZXJzOmFtYWJkb2w6RGF0YWJhc2VzOlRpbGJ1cmcuZHRCYXNlMjpGaWxlcy5ub2luZGV4OnBkZjo1OjIwMTRfT3V0bGllciByZW1vdmFsLCBzdW0gc2NvcmVzLCBhbmQgdGhlIGluZmxhdGlvbiBvZiB0aGUgdHlwZSBpIGVycm9yIHJhdGUgaW4gaW5kZXBlbmRlbnQgc2FtcGxlcyB0IHRlc3RzIFRoZSBwb3dlciBvZiBhbHRlcm5hdGl2ZXMgYW4ucGRmAA4BGgCMADIAMAAxADQAXwBPAHUAdABsAGkAZQByACAAcgBlAG0AbwB2AGEAbAAsACAAcwB1AG0AIABzAGMAbwByAGUAcwAsACAAYQBuAGQAIAB0AGgAZQAgAGkAbgBmAGwAYQB0AGkAbwBuACAAbwBmACAAdABoAGUAIAB0AHkAcABlACAAaQAgAGUAcgByAG8AcgAgAHIAYQB0AGUAIABpAG4AIABpAG4AZABlAHAAZQBuAGQAZQBuAHQAIABzAGEAbQBwAGwAZQBzACAAdAAgAHQAZQBzAHQAcwAgAFQAaABlACAAcABvAHcAZQByACAAbwBmACAAYQBsAHQAZQByAG4AYQB0AGkAdgBlAHMAIABhAG4ALgBwAGQAZgAPABoADABNAGEAYwBpAG4AdABvAHMAaAAgAEgARAASAMhVc2Vycy9hbWFiZG9sL0RhdGFiYXNlcy9UaWxidXJnLmR0QmFzZTIvRmlsZXMubm9pbmRleC9wZGYvNS8yMDE0X091dGxpZXIgcmVtb3ZhbCwgc3VtIHNjb3JlcywgYW5kIHRoZSBpbmZsYXRpb24gb2YgdGhlIHR5cGUgaSBlcnJvciByYXRlIGluIGluZGVwZW5kZW50IHNhbXBsZXMgdCB0ZXN0cyBUaGUgcG93ZXIgb2YgYWx0ZXJuYXRpdmVzIGFuLnBkZgATAAEvAAAVAAIADv//AAAACAANABoAJADnAAAAAAAAAgEAAAAAAAAABQAAAAAAAAAAAAAAAAAABG0=}}
@article{Bakker_2012,
author = {Marjan Bakker and Annette van Dijk and Jelte M. Wicherts},
date-added = {2020-09-24 16:57:44 +0200},
date-modified = {2020-09-24 16:57:44 +0200},
doi = {10.1177/1745691612459060},
journal = {Perspectives on Psychological Science},
month = {nov},
number = {6},
pages = {543--554},
publisher = {{SAGE} Publications},
title = {The Rules of the Game Called Psychological Science},
url = {https://doi.org/10.1177%2F1745691612459060},
volume = {7},
year = 2012,
Bdsk-File-1 = {YnBsaXN0MDDSAQIDBFxyZWxhdGl2ZVBhdGhZYWxpYXNEYXRhXxCnLi4vLi4vTGlicmFyeS9BcHBsaWNhdGlvbiBTdXBwb3J0L0RFVk9OdGhpbmsgMy9JbmJveC5kdEJhc2UyL0ZpbGVzLm5vaW5kZXgvcGRmLzAvMjAxMl9UaGUgUnVsZXMgb2YgdGhlIEdhbWUgQ2FsbGVkIFBzeWNob2xvZ2ljYWwgU2NpZW5jZV9CYWtrZXIsIHZhbiBEaWprLCBXaWNoZXJ0cy5wZGZPEQLmAAAAAALmAAIAAAxNYWNpbnRvc2ggSEQAAAAAAAAAAAAAAAAAAAAAAAAAQkQAAf////8fMjAxMl9UaGUgUnVsZXMgb2YgI0ZGRkZGRkZGLnBkZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/////wAAAAAAAAAAAAAAAAACAAgAAAogY3UAAAAAAAAAAAAAAAAAATAAAAIAsS86VXNlcnM6YW1hYmRvbDpMaWJyYXJ5OkFwcGxpY2F0aW9uIFN1cHBvcnQ6REVWT050aGluayAzOkluYm94LmR0QmFzZTI6RmlsZXMubm9pbmRleDpwZGY6MDoyMDEyX1RoZSBSdWxlcyBvZiB0aGUgR2FtZSBDYWxsZWQgUHN5Y2hvbG9naWNhbCBTY2llbmNlX0Jha2tlciwgdmFuIERpamssIFdpY2hlcnRzLnBkZgAADgCuAFYAMgAwADEAMgBfAFQAaABlACAAUgB1AGwAZQBzACAAbwBmACAAdABoAGUAIABHAGEAbQBlACAAQwBhAGwAbABlAGQAIABQAHMAeQBjAGgAbwBsAG8AZwBpAGMAYQBsACAAUwBjAGkAZQBuAGMAZQBfAEIAYQBrAGsAZQByACwAIAB2AGEAbgAgAEQAaQBqAGsALAAgAFcAaQBjAGgAZQByAHQAcwAuAHAAZABmAA8AGgAMAE0AYQBjAGkAbgB0AG8AcwBoACAASABEABIAr1VzZXJzL2FtYWJkb2wvTGlicmFyeS9BcHBsaWNhdGlvbiBTdXBwb3J0L0RFVk9OdGhpbmsgMy9JbmJveC5kdEJhc2UyL0ZpbGVzLm5vaW5kZXgvcGRmLzAvMjAxMl9UaGUgUnVsZXMgb2YgdGhlIEdhbWUgQ2FsbGVkIFBzeWNob2xvZ2ljYWwgU2NpZW5jZV9CYWtrZXIsIHZhbiBEaWprLCBXaWNoZXJ0cy5wZGYAABMAAS8AABUAAgAO//8AAAAIAA0AGgAkAM4AAAAAAAACAQAAAAAAAAAFAAAAAAAAAAAAAAAAAAADuA==}}