generated from lisa-coes/lisa-book
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbook.bib
4144 lines (3864 loc) · 343 KB
/
book.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@misc{__,
howpublished = {https://www.stata.com/meeting/italy14/abstracts/materials/it14\_haghish.pdf}
}
@misc{_Figueiredo_2021,
title = {Figueiredo, {{Martinovic}}, {{Rees}}, and {{Licata}}: {{Collective Memories}} and {{Present}}-{{Day Intergroup Relations}}: {{Introduction}} to the {{Special Thematic Section}}},
shorttitle = {Figueiredo, {{Martinovic}}, {{Rees}}, and {{Licata}}},
year = {2021},
month = jun,
doi = {10.5964/jspp.v5i2.895},
howpublished = {https://jspp.psychopen.eu/index.php/jspp/article/download/4995/4995.html?inline=1}
}
@misc{_impact_2021,
title = {The Impact of a Multicultural Exchange between Indigenous and Non-Indigenous History Teachers for Students' Attitudes: Preliminary Evidence from a Pilot Study in {{Chile}}: {{Multicultural Education Review}}: {{Vol}} 12, {{No}} 3},
year = {2021},
month = jun,
howpublished = {https://www.tandfonline.com/doi/abs/10.1080/2005615X.2020.1808927}
}
@misc{_Increasing_,
title = {Increasing the {{Credibility}} of {{Political Science Research}}: {{A Proposal}} for {{Journal Reforms}}-{{Web}} of {{Science Core Collection}}},
howpublished = {https://www-webofscience-com.uchile.idm.oclc.org/wos/woscc/full-record/WOS:000359291900014}
}
@misc{_Retraction_,
title = {Retraction {{Watch}}},
journal = {Retraction Watch},
abstract = {Tracking retractions as a window into the scientific process},
howpublished = {https://retractionwatch.com/},
language = {en-US}
}
@misc{_Web_,
title = {Web of {{Science}}},
howpublished = {https://www-webofscience-com.uchile.idm.oclc.org/wos/}
}
@article{abdill_tracking_2019,
title = {Tracking the Popularity and Outcomes of All {{bioRxiv}} Preprints},
author = {Abdill, Richard J. and Blekhman, Ran},
year = {2019},
journal = {bioRxiv},
pages = {515643},
doi = {10.1101/515643},
abstract = {Researchers in the life sciences are posting work to preprint servers at an unprecedented and increasing rate, sharing papers online before (or instead of) publication in peer-reviewed journals. Though the increasing acceptance of preprints is driving policy changes for journals and funders, there is little information about their usage. Here, we collected and analyzed data on all 37,648 preprints uploaded to bioRxiv.org, the largest biology-focused preprint server, in its first five years. We find preprints are being downloaded more than ever before (1.1 million tallied in October 2018 alone) and that the rate of preprints being posted has increased to a recent high of 2,100 per month. We also find that two-thirds of preprints posted before 2017 were later published in peer-reviewed journals, and find a relationship between journal impact factor and preprint downloads. Lastly, we developed Rxivist.org, a web application providing multiple ways of interacting with preprint metadata.}
}
@book{abrilruiz_Manzanas_2019,
title = {{Manzanas podridas: Malas pr\'acticas de investigaci\'on y ciencia descuidada}},
shorttitle = {{Manzanas podridas}},
author = {Abril Ruiz, Angel},
year = {2019},
isbn = {978-1-07-075536-6},
language = {Spanish},
annotation = {OCLC: 1120499121}
}
@article{aczel_consensusbased_2020,
title = {A Consensus-Based Transparency Checklist},
author = {Aczel, Balazs and Szaszi, Barnabas and Sarafoglou, Alexandra and Kekecs, Zoltan and Kucharsk{\'y}, {\v S}imon and Benjamin, Daniel and Chambers, Christopher D. and Fisher, Agneta and Gelman, Andrew and Gernsbacher, Morton A. and Ioannidis, John P. and Johnson, Eric and Jonas, Kai and Kousta, Stavroula and Lilienfeld, Scott O. and Lindsay, D. Stephen and Morey, Candice C. and Munaf{\`o}, Marcus and Newell, Benjamin R. and Pashler, Harold and Shanks, David R. and Simons, Daniel J. and Wicherts, Jelte M. and Albarracin, Dolores and Anderson, Nicole D. and Antonakis, John and Arkes, Hal R. and Back, Mitja D. and Banks, George C. and Beevers, Christopher and Bennett, Andrew A. and Bleidorn, Wiebke and Boyer, Ty W. and Cacciari, Cristina and Carter, Alice S. and Cesario, Joseph and Clifton, Charles and Conroy, Ron{\'a}n M. and Cortese, Mike and Cosci, Fiammetta and Cowan, Nelson and Crawford, Jarret and Crone, Eveline A. and Curtin, John and Engle, Randall and Farrell, Simon and Fearon, Pasco and Fichman, Mark and Frankenhuis, Willem and Freund, Alexandra M. and Gaskell, M. Gareth and {Giner-Sorolla}, Roger and Green, Don P. and Greene, Robert L. and Harlow, Lisa L. and {de la Guardia}, Fernando Hoces and Isaacowitz, Derek and Kolodner, Janet and Lieberman, Debra and Logan, Gordon D. and Mendes, Wendy B. and Moersdorf, Lea and Nyhan, Brendan and Pollack, Jeffrey and Sullivan, Christopher and Vazire, Simine and Wagenmakers, Eric-Jan},
year = {2020},
month = jan,
journal = {Nature Human Behaviour},
volume = {4},
number = {1},
pages = {4--6},
publisher = {{Nature Publishing Group}},
issn = {2397-3374},
doi = {10.1038/s41562-019-0772-6},
abstract = {We present a consensus-based checklist to improve and document the transparency of research reports in social and behavioural research. An accompanying online application allows users to complete the form and generate a report that they can submit with their manuscript or post to a public repository.},
copyright = {2019 The Author(s)},
language = {en},
keywords = {forrt,herramienta}
}
@misc{agencianacionaldeinvestigacionydesarrollo_consulta_2020,
title = {Consulta {{P\'ublica}}: {{Pol\'itica Acceso Abierto}} a {{Informaci\'on Cient\'ifica}}},
author = {{Agencia Nacional de Investigaci{\'o}n y Desarrollo}, (ANID)},
year = {2020}
}
@article{agnoli_Questionable_2017,
title = {Questionable Research Practices among Italian Research Psychologists},
author = {Agnoli, Franca and Wicherts, Jelte M. and Veldkamp, Coosje L. S. and Albiero, Paolo and Cubelli, Roberto},
year = {2017},
month = mar,
journal = {PLOS ONE},
volume = {12},
number = {3},
pages = {e0172792},
publisher = {{Public Library of Science}},
issn = {1932-6203},
doi = {10.1371/journal.pone.0172792},
abstract = {A survey in the United States revealed that an alarmingly large percentage of university psychologists admitted having used questionable research practices that can contaminate the research literature with false positive and biased findings. We conducted a replication of this study among Italian research psychologists to investigate whether these findings generalize to other countries. All the original materials were translated into Italian, and members of the Italian Association of Psychology were invited to participate via an online survey. The percentages of Italian psychologists who admitted to having used ten questionable research practices were similar to the results obtained in the United States although there were small but significant differences in self-admission rates for some QRPs. Nearly all researchers (88\%) admitted using at least one of the practices, and researchers generally considered a practice possibly defensible if they admitted using it, but Italian researchers were much less likely than US researchers to consider a practice defensible. Participants' estimates of the percentage of researchers who have used these practices were greater than the self-admission rates, and participants estimated that researchers would be unlikely to admit it. In written responses, participants argued that some of these practices are not questionable and they have used some practices because reviewers and journals demand it. The similarity of results obtained in the United States, this study, and a related study conducted in Germany suggest that adoption of these practices is an international phenomenon and is likely due to systemic features of the international research and publication processes.},
language = {en},
keywords = {Behavior,Experimental psychology,Italian people,Psychologists,Psychology,Psychometrics,Questionnaires,United States}
}
@article{allen_Open_2019,
title = {Open Science Challenges, Benefits and Tips in Early Career and Beyond},
author = {Allen, Christopher and Mehler, David M. A.},
year = {2019},
month = may,
journal = {PLOS Biology},
volume = {17},
number = {5},
pages = {e3000246},
publisher = {{Public Library of Science}},
issn = {1545-7885},
doi = {10.1371/journal.pbio.3000246},
abstract = {The movement towards open science is a consequence of seemingly pervasive failures to replicate previous research. This transition comes with great benefits but also significant challenges that are likely to affect those who carry out the research, usually early career researchers (ECRs). Here, we describe key benefits, including reputational gains, increased chances of publication, and a broader increase in the reliability of research. The increased chances of publication are supported by exploratory analyses indicating null findings are substantially more likely to be published via open registered reports in comparison to more conventional methods. These benefits are balanced by challenges that we have encountered and that involve increased costs in terms of flexibility, time, and issues with the current incentive structure, all of which seem to affect ECRs acutely. Although there are major obstacles to the early adoption of open science, overall open science practices should benefit both the ECR and improve the quality of research. We review 3 benefits and 3 challenges and provide suggestions from the perspective of ECRs for moving towards open science practices, which we believe scientists and institutions at all levels would do well to consider.},
language = {en},
keywords = {Careers,Experimental design,Neuroimaging,Open data,Open science,Peer review,Reproducibility,Statistical data}
}
@article{allison_Reproducibility_2018,
title = {Reproducibility of Research: {{Issues}} and Proposed Remedies},
shorttitle = {Reproducibility of Research},
author = {Allison, David B. and Shiffrin, Richard M. and Stodden, Victoria},
year = {2018},
month = mar,
journal = {Proceedings of the National Academy of Sciences},
volume = {115},
number = {11},
pages = {2561--2562}
}
@book{alperin_indicadores_2014,
title = {{Indicadores de acceso abierto y comunicaciones acad\'emicas en Am\'erica Latina}},
shorttitle = {{Indicadores de AA}},
author = {Alperin, Juan Pablo and Babini, Dominique and Fischman, Gustavo},
year = {2014},
edition = {Juan Pablo Alperin},
volume = {1},
publisher = {{Consejo Latinoamericano de Ciencias Sociales, CLACSO}},
address = {{Buenos Aires, Argentina}},
abstract = {El mundo hoy en d\'ia cuenta con frases como ``muerte de la distancia'' lo que sugiere que la distancia ya no es un factor limitante en la capacidad de las personas para interactuar y comunicarse. Otro aforismo es que el mundo est\'a ``aplanado'' en t\'erminos de oportunidades, que son facilitadas por el avance de las Tecnolog\'ias de la Comunicaci\'on y de la Informaci\'on (TIC) que han permitido la convergencia de los consorcios y recursos de conocimiento de todo el mundo. A medida que las sociedades se van transformando, los paisajes del conocimiento y su interacci\'on dentro y entre las sociedades tambi\'en est\'an cambiando.},
copyright = {Creative Commons Attribution 4.0 International, Open Access},
isbn = {978-987-722-042-1},
language = {es}
}
@article{an_Crisis_2018,
title = {The {{Crisis}} of {{Reproducibility}}, the {{Denominator Problem}} and the {{Scientific Role}} of {{Multi}}-Scale {{Modeling}}},
author = {An, Gary},
year = {2018},
month = dec,
journal = {Bulletin of Mathematical Biology},
volume = {80},
number = {12},
pages = {3071--3080},
issn = {1522-9602},
doi = {10.1007/s11538-018-0497-0},
abstract = {The ``Crisis of Reproducibility'' has received considerable attention both within the scientific community and without. While factors associated with scientific culture and practical practice are most often invoked, I propose that the Crisis of Reproducibility is ultimately a failure of generalization with a fundamental scientific basis in the methods used for biomedical research. The Denominator Problem describes how limitations intrinsic to the two primary approaches of biomedical research, clinical studies and preclinical experimental biology, lead to an inability to effectively characterize the full extent of biological heterogeneity, which compromises the task of generalizing acquired knowledge. Drawing on the example of the unifying role of theory in the physical sciences, I propose that multi-scale mathematical and dynamic computational models, when mapped to the modular structure of biological systems, can serve a unifying role as formal representations of what is conserved and similar from one biological context to another. This ability to explicitly describe the generation of heterogeneity from similarity addresses the Denominator Problem and provides a scientific response to the Crisis of Reproducibility.},
language = {en},
keywords = {crisis}
}
@article{andrea_Why_2018,
title = {Why Science's Crisis Should Not Become a Political Battling Ground},
author = {Andrea, Saltelli},
year = {2018},
month = dec,
journal = {Futures},
volume = {104},
pages = {85--90},
issn = {0016-3287},
doi = {10.1016/j.futures.2018.07.006},
abstract = {A science war is in full swing which has taken science's reproducibility crisis as a battleground. While conservatives and corporate interests use the crisis to weaken regulations, their opponent deny the existence of a science's crisis altogether. Thus, for the conservative National Association of Scholars NAS the crisis is real and due to the progressive assault on higher education with ideologies such as ``neo-Marxism, radical feminism, historicism, post-colonialism, deconstructionism, post-modernism, liberation theology''. In the opposite field, some commentators claim that there is no crisis in science and that saying the opposite is irresponsible. These positions are to be seen in the context of the ongoing battle against regulation, of which the new rules proposed at the US Environmental Protection Agency (EPA) are but the last chapter. In this optic, Naomi Oreskes writes on Nature that what constitutes the crisis is the conservatives' attack on science. This evident right-left divide in the reading of the crisis is unhelpful and dangerous to the survival of science itself. An alternative reading ignored by the contendents would suggest that structural contradictions have emerged in modern science, and that addressing these should be the focus of our attention.},
language = {en},
keywords = {crisis,Evidence-based policy,History and philosophy of science,Post-normal science,Science and technology studies,Science’s crisis,Science’s reproducibility,Science’s war,Scientism}
}
@article{angell_Publish_1986,
title = {Publish or {{Perish}}: {{A Proposal}}},
shorttitle = {Publish or {{Perish}}},
author = {Angell, Marcia},
year = {1986},
month = feb,
journal = {Annals of Internal Medicine},
volume = {104},
number = {2},
pages = {261--262},
publisher = {{American College of Physicians}},
issn = {0003-4819},
doi = {10.7326/0003-4819-104-2-261},
keywords = {institutional}
}
@misc{anid_propuesta_2020,
title = {Propuesta de {{Pol\'itica}} de Acceso Abierto a La Informaci\'on Cient\'ifica y Adatos de Investigaci\'onfinanciados Con Fondos P\'ublicos de La {{ANID}}},
author = {ANID},
year = {2020},
publisher = {{ANID}}
}
@article{anvari_replicability_2018,
title = {The Replicability Crisis and Public Trust in Psychological Science},
author = {Anvari, Farid and Lakens, Dani{\"e}l},
year = {2018},
month = sep,
journal = {Comprehensive Results in Social Psychology},
volume = {3},
number = {3},
pages = {266--286},
publisher = {{Routledge}},
issn = {2374-3603},
doi = {10.1080/23743603.2019.1684822},
abstract = {Replication failures of past findings in several scientific disciplines, including psychology, medicine, and experimental economics, have created a ``crisis of confidence'' among scientists. Psychological science has been at the forefront of tackling these issues, with discussions about replication failures and scientific self-criticisms of questionable research practices (QRPs) increasingly taking place in public forums. How this replicability crisis impacts the public's trust is a question yet to be answered by research. Whereas some researchers believe that the public's trust will be positively impacted or maintained, others believe trust will be diminished. Because it is our field of expertise, we focus on trust in psychological science. We performed a study testing how public trust in past and future psychological research would be impacted by being informed about (i) replication failures (replications group), (ii) replication failures and criticisms of QRPs (QRPs group), and (iii) replication failures, criticisms of QRPs, and proposed reforms (reforms group). Results from a mostly European sample (N = 1129) showed that, compared to a control group, people in the replications, QRPs, and reforms groups self-reported less trust in past research. Regarding trust in future research, the replications and QRPs groups did not significantly differ from the control group. Surprisingly, the reforms group had less trust in future research than the control group. Nevertheless, people in the replications, QRPs, and reforms groups did not significantly differ from the control group in how much they believed future research in psychological science should be supported by public funding. Potential explanations are discussed.},
keywords = {crisis of confidence,open science,Replicability crisis,reproducibility crisis,trust in science},
annotation = {\_eprint: https://doi.org/10.1080/23743603.2019.1684822}
}
@article{armeni_widescale_2021,
title = {Towards Wide-Scale Adoption of Open Science Practices: {{The}} Role of Open Science Communities},
shorttitle = {Towards Wide-Scale Adoption of Open Science Practices},
author = {Armeni, Kristijan and Brinkman, Loek and Carlsson, Rickard and Eerland, Anita and Fijten, Rianne and Fondberg, Robin and Heininga, Vera E and Heunis, Stephan and Koh, Wei Qi and Masselink, Maurits and Moran, Niall and Baoill, Andrew {\'O} and Sarafoglou, Alexandra and Schettino, Antonio and Schwamm, Hardy and Sjoerds, Zsuzsika and Teperek, Marta and {van den Akker}, Olmo R and {van't Veer}, Anna and {Zurita-Milla}, Raul},
year = {2021},
month = jul,
journal = {Science and Public Policy},
number = {scab039},
issn = {0302-3427},
doi = {10.1093/scipol/scab039},
abstract = {Despite the increasing availability of Open Science (OS) infrastructure and the rise in policies to change behaviour, OS practices are not yet the norm. While pioneering researchers are developing OS practices, the majority sticks to status quo. To transition to common practice, we must engage a critical proportion of the academic community. In this transition, OS Communities (OSCs) play a key role. OSCs are bottom-up learning groups of scholars that discuss OS within and across disciplines. They make OS knowledge more accessible and facilitate communication among scholars and policymakers. Over the past two years, eleven OSCs were founded at several Dutch university cities. In other countries, similar OSCs are starting up. In this article, we discuss the pivotal role OSCs play in the large-scale transition to OS. We emphasize that, despite the grassroot character of OSCs, support from universities is critical for OSCs to be viable, effective, and sustainable.}
}
@inproceedings{babini_universidades_2014,
title = {{Universidades y acceso abierto: hora de tomar protagonismo}},
booktitle = {{Foro Revista Iberoamericana de Ciencia, Tecnolog\'ia y Sociedad}},
author = {Babini, Dominique},
year = {2014},
pages = {1--3},
publisher = {{2015}},
abstract = {Las universidades est\'an en condiciones tener mayor protagonismo en la construcci\'on de un acceso abierto global cooperativo no comercial, sustentable e inclusivo. Pueden: desarrollar sus propios portales con las revistas que publica cada universidad, crear repositorios digitales institucionales que reflejen la propia producci\'on cient\'ifica y acad\'emica de cada instituci\'on disponible gratis en texto completo, participar activamente en los sistemas nacionales de repositorios de sus pa\'ises, aportar una revisi\'on cr\'itica de las actuales modalidades de evaluaci\'on de la investigaci\'on.},
language = {es}
}
@article{baker_500_2016,
title = {1,500 Scientists Lift the Lid on Reproducibility},
author = {Baker, Monya},
year = {2016},
month = may,
journal = {Nature},
volume = {533},
number = {7604},
pages = {452--454},
publisher = {{Nature Publishing Group}},
issn = {1476-4687},
doi = {10.1038/533452a},
abstract = {Survey sheds light on the `crisis' rocking research.},
copyright = {2016 Nature Publishing Group},
language = {en},
keywords = {crisis}
}
@misc{bakker_Ensuring_2018,
title = {Ensuring the Quality and Specificity of Preregistrations},
author = {Bakker, Marjan and Veldkamp, Coosje Lisabet Sterre and van Assen, Marcel A. L. M. and Crompvoets, Elise Anne Victoire and Ong, How Hwee and Nosek, Brian A. and Soderberg, Courtney K. and Mellor, David Thomas and Wicherts, Jelte},
year = {2018},
month = sep,
institution = {{PsyArXiv}},
doi = {10.31234/osf.io/cdgyh},
abstract = {Researchers face many, often seemingly arbitrary choices in formulating hypotheses, designing protocols, collecting data, analyzing data, and reporting results. Opportunistic use of `researcher degrees of freedom' aimed at obtaining statistical significance increases the likelihood of obtaining and publishing false positive results and overestimated effect sizes. Preregistration is a mechanism for reducing such degrees of freedom by specifying designs and analysis plans before observing the research outcomes. The effectiveness of preregistration may depend, in part, on whether the process facilitates sufficiently specific articulation of such plans. In this preregistered study, we compared two formats of preregistration available on the OSF: Standard Pre-Data Collection Registration and Prereg Challenge registration (now called ``OSF Preregistration'', http://osf.io/prereg/). The Prereg Challenge format was a structured workflow with detailed instructions, and an independent review to confirm completeness; the ``Standard'' format was unstructured with minimal direct guidance to give researchers flexibility for what to pre-specify. Results of comparing random samples of 53 preregistrations from each format indicate that the structured format restricted the opportunistic use of researcher degrees of freedom better (Cliff's Delta = 0.49) than the unstructured format, but neither eliminated all researcher degrees of freedom. We also observed very low concordance among coders about the number of hypotheses (14\%), indicating that they are often not clearly stated. We conclude that effective preregistration is challenging, and registration formats that provide effective guidance may improve the quality of research.},
keywords = {Meta-science,preregistration,Quantitative Methods,Questionable research practices,researcher degrees of freedom,Social and Behavioral Sciences,Statistical Methods}
}
@misc{bakker_Questionable_2020,
title = {Questionable and Open Research Practices: Attitudes and Perceptions among Quantitative Communication Researchers},
shorttitle = {Questionable and Open Research Practices},
author = {Bakker, Bert N. and Jaidka, Kokil and D{\"o}rr, Timothy and Fasching, Neil and Lelkes, Yphtach},
year = {2020},
month = nov,
institution = {{PsyArXiv}},
doi = {10.31234/osf.io/7uyn5},
abstract = {Recent contributions have questioned the credibility of quantitative communication research. While questionable research practices are believed to be widespread, evidence for this claim is primarily derived from other disciplines. Before change in communication research can happen, it is important to document the extent to which QRPs are used and whether researchers are open to the changes proposed by the so-called open science agenda. We conducted a large survey among authors of papers published in the top-20 journals in communication science in the last ten years (N=1039). A non-trivial percent of researchers report using one or more QRPs. While QRPs are generally considered unacceptable, researchers perceive QRPs to be common among their colleagues. At the same time, we find optimism about the use of open science practices in communication research. We end with a series of recommendations outlining what journals, institutions and researchers can do moving forward.},
keywords = {other,Psychology,Social and Behavioral Sciences}
}
@article{banzato_soberania_2019,
title = {{Soberan\'ia del conocimiento para superar inequidades: pol\'iticas de Acceso Abierto para revistas cient\'ificas en Am\'erica Latina}},
author = {Banzato, Guillermo},
year = {2019},
journal = {Mecila Working Paper Series},
volume = {18},
pages = {1--18},
abstract = {Desde el comienzo de la era digital, determinadas pol\'iticas de gesti\'on de la ciencia han incrementado las inequidades en las condiciones de producci\'on del conocimiento y en las posibilidades de di\'alogo entre los colectivos de investigadores. A fines del siglo XX y principios del XXI se inici\'o una reacci\'on en las m\'as prestigiosas bibliotecas y comunidades cient\'ificas de Am\'erica del Norte y Europa Occidental, y Am\'erica Latina comenz\'o el desarrollo de sistemas de visibilidad propios, al tiempo que sucesivas declaraciones fueron definiendo al Acceso Abierto como estrategia para superar tales inequidades. En esta direcci\'on, se han desarrollado revistas en Acceso Abierto cuya sustentabilidad est\'a siendo puesta a prueba. Este trabajo presenta un breve estado de situaci\'on actualizado sobre algunos problemas que enfrentan los autores, evaluadores y editores latinoamericanos en la gesti\'on y publicaci\'on de los resultados de las investigaciones. Asimismo, en \'el se argumenta en pro del Acceso Abierto como herramienta primordial para garantizar la soberan\'ia del conocimiento en el Sur Global, y se sostiene que la propuesta colaborativa para la construcci\'on conjunta de un sistema sustentable de edici\'on cient\'ifica en Acceso Abierto puede ayudar a superar las inequidades en la producci\'on y difusi\'on del conocimiento latinoamericano},
language = {es}
}
@article{barba_Terminologies_2018,
title = {Terminologies for {{Reproducible Research}}},
author = {Barba, Lorena A.},
year = {2018},
month = feb,
journal = {arXiv:1802.03311 [cs]},
eprint = {1802.03311},
eprinttype = {arxiv},
primaryclass = {cs},
abstract = {Reproducible research---by its many names---has come to be regarded as a key concern across disciplines and stakeholder groups. Funding agencies and journals, professional societies and even mass media are paying attention, often focusing on the so-called "crisis" of reproducibility. One big problem keeps coming up among those seeking to tackle the issue: different groups are using terminologies in utter contradiction with each other. Looking at a broad sample of publications in different fields, we can classify their terminology via decision tree: they either, A---make no distinction between the words reproduce and replicate, or B---use them distinctly. If B, then they are commonly divided in two camps. In a spectrum of concerns that starts at a minimum standard of "same data+same methods=same results," to "new data and/or new methods in an independent study=same findings," group 1 calls the minimum standard reproduce, while group 2 calls it replicate. This direct swap of the two terms aggravates an already weighty issue. By attempting to inventory the terminologies across disciplines, I hope that some patterns will emerge to help us resolve the contradictions.},
archiveprefix = {arXiv},
keywords = {Computer Science - Digital Libraries}
}
@article{becerrilgarcia_end_2019,
title = {The {{End}} of a {{Centralized Open Access Project}} and the {{Beginning}} of a {{Community}}-{{Based Sustainable Infrastructure}} for {{Latin America}}},
author = {Becerril Garc{\'i}a, Arianna and Aguado L{\'o}pez, Eduardo},
year = {2019},
journal = {OpenEdition Press},
pages = {41--55},
doi = {10.4000/books.oep. 9003.},
abstract = {The Latin American region has an ecosystem where the nature of publication is conceived as the act of making public, of sharing, not as the publishing industry. International, national and institutional contexts have led to redefine a project\textemdash Redalyc.org\textemdash that began in 2003 and that has already fulfilled its original mission: give visibility to knowledge coming from Latin America and promote qualitative scientific journals. Nevertheless, it has to be transformed from a Latin American platform based in Mexico into a community- based regional infrastructure that continues assessing journals' quality and providing access to full-text, thus allowing visibility for journals and free access to knowledge. It is a framework that generates technology in favor of the empowerment and professionalization of journal editors, making sustainable the editorial task in open access so that Redalyc may sustain itself collectively. This work describes Redalyc's first model, presents the problematic in process and the new business model Redalyc is designing and adopting to operate.}
}
@misc{beigel_america_2021,
type = {{Scientific Blog}},
title = {{Am\'erica Latina podr\'ia convertirse en l\'ider mundial de la ciencia abierta no comercial}},
author = {Beigel, Fernanda},
year = {2021},
journal = {The Conversation},
abstract = {Para hacer frente a los retos del pr\'oximo siglo \textendash desde las pandemias hasta el cambio clim\'atico, pasando por la automatizaci\'on y el big data\textendash, la ciencia debe estar abierta a todas las personas del mundo. La ciudadan\'ia deben tener el mismo acceso a la informaci\'on que los investigadores, y estos necesitan acceder a repositorios de conocimiento de alta calidad e interconectados para avanzar en nuestra comprensi\'on del mundo que nos rodea.},
copyright = {Creative Commons 3.0},
language = {es}
}
@article{beigel_relaciones_2018,
title = {{Las relaciones de poder en la ciencia mundial}},
shorttitle = {{NUSO}},
author = {Beigel, Fernanda},
year = {2018},
journal = {Nueva Sociedad},
volume = {274},
pages = {13--28},
issn = {0251-3552},
abstract = {Los rankings universitarios se crearon principalmente para intervenir en los flujos internacionales de estudiantes, pero se convirtieron progresivamente en una fuente directa para reforzar el prestigio de un peque\~no grupo de universidades, de sus principales revistas y editoriales oligop\'olicas. Su aplicaci\'on tiende a volver cada vez m\'as perif\'erica a la ciencia desarrollada en los espacios alejados del circuito mainstream o de corriente principal. Por eso es necesario crear nuevas herramientas de medici\'onde la producci\'on cient\'ifica de la periferia que contemplen las interacciones de sus universidades en sus distintas direcciones, y no solo con los circuitos dominantes.},
language = {es}
}
@article{benjamin-chung_Internal_2020,
title = {Internal Replication of Computational Workflows in Scientific Research},
author = {{Benjamin-Chung}, Jade and Colford, Jr., John M. and Mertens, Andrew and Hubbard, Alan E. and Arnold, Benjamin F.},
year = {2020},
month = jun,
journal = {Gates Open Research},
volume = {4},
pages = {17},
issn = {2572-4754},
doi = {10.12688/gatesopenres.13108.2},
abstract = {Failures to reproduce research findings across scientific disciplines from psychology to physics have garnered increasing attention in recent years. External replication of published findings by outside investigators has emerged as a method to detect errors and bias in the published literature. However, some studies influence policy and practice before external replication efforts can confirm or challenge the original contributions. Uncovering and resolving errors before publication would increase the efficiency of the scientific process by increasing the accuracy of published evidence. Here we summarize the rationale and best practices for internal replication, a process in which multiple independent data analysts replicate an analysis and correct errors prior to publication. We explain how internal replication should reduce errors and bias that arise during data analyses and argue that it will be most effective when coupled with pre-specified hypotheses and analysis plans and performed with data analysts masked to experimental group assignments. By improving the reproducibility of published evidence, internal replication should contribute to more rapid scientific advances.},
language = {en}
}
@article{benning_Registration_2019a,
title = {The {{Registration Continuum}} in {{Clinical Science}}: {{A Guide Toward Transparent Practices}}},
shorttitle = {The {{Registration Continuum}} in {{Clinical Science}}},
author = {Benning, Stephen D. and Bachrach, Rachel L. and Smith, Edward A. and Freeman, Andrew J. and Wright, Aidan G. C.},
year = {2019},
month = aug,
journal = {Journal of Abnormal Psychology},
volume = {128},
number = {6},
pages = {528--540},
publisher = {{Amer Psychological Assoc}},
address = {{Washington}},
issn = {0021-843X},
doi = {10.1037/abn0000451},
abstract = {Clinical scientists can use a continuum of registration efforts that vary in their disclosure and timing relative to data collection and analysis. Broadly speaking, registration benefits investigators by offering stronger, more powerful tests of theory with particular methods in tandem with better control of long-run false positive error rates. Registration helps clinical researchers in thinking through tensions between bandwidth and fidelity that surround recruiting participants, defining clinical phenotypes, handling comorbidity, treating missing data. and analyzing rich and complex data. In particular. registration helps record and justify the reasons behind specific study design decisions, though it also provides the opportunity to register entire decision trees with specific endpoints. Creating ever more faithful registrations and standard operating procedures may offer alternative methods of judging a clinical investigator's scientific skill and eminence because study registration increases the transparency of clinical researchers' work.},
language = {English},
keywords = {coregistration,credibility,disorders,flexibility,framework,postregistration,preregistration,psychopathology,registered-reports,symptoms,transparency},
annotation = {WOS:000478024300006}
}
@article{bergh_there_2017,
title = {Is There a Credibility Crisis in Strategic Management Research? {{Evidence}} on the Reproducibility of Study Findings},
shorttitle = {Is There a Credibility Crisis in Strategic Management Research?},
author = {Bergh, Donald D and Sharp, Barton M and Aguinis, Herman and Li, Ming},
year = {2017},
month = aug,
journal = {Strategic Organization},
volume = {15},
number = {3},
pages = {423--436},
publisher = {{SAGE Publications}},
issn = {1476-1270},
doi = {10.1177/1476127017701076},
abstract = {Recent studies report an inability to replicate previously published research, leading some to suggest that scientific knowledge is facing a credibility crisis. In this essay, we provide evidence on whether strategic management research may itself be vulnerable to these concerns. We conducted a study whereby we attempted to reproduce the empirical findings of 88 articles appearing in the Strategic Management Journal using data reported in the articles themselves. About 70\% of the studies did not disclose enough data to permit independent tests of reproducibility of their findings. Of those that could be retested, almost one-third reported hypotheses as statistically significant which were no longer so and far more significant results were found to be non-significant in the reproductions than in the opposite direction. Collectively, incomplete reporting practices, disclosure errors, and possible opportunism limit the reproducibility of most studies. Until disclosure standards and requirements change to include more complete reporting and facilitate tests of reproducibility, the strategic management field appears vulnerable to a credibility crisis.},
language = {en},
keywords = {crisis,knowledge credibility,replication,reproducibility}
}
@article{bergkvist_Preregistration_2020a,
title = {Preregistration as a Way to Limit Questionable Research Practice in Advertising Research},
author = {Bergkvist, Lars},
year = {2020},
month = oct,
journal = {International Journal of Advertising},
volume = {39},
number = {7},
pages = {1172--1180},
publisher = {{Routledge Journals, Taylor \& Francis Ltd}},
address = {{Abingdon}},
issn = {0265-0487},
doi = {10.1080/02650487.2020.1753441},
abstract = {This paper discusses two phenomena that threaten the credibility of scientific research and suggests an approach to limiting the extent of their use in advertising research. HARKing (hypothesizing after the results are known) refers to when hypotheses are formulated or modified after the results of a study are known. P-hacking refers to various practices (e.g., adding respondents, introducing control variables) that increase the likelihood of obtaining statistically significant results from a study. Both of these practices increase the risk of false positives (Type I errors) in research results and it is in the interest of the advertising research field that they are limited. Voluntary preregistration, where researchers commit to and register their research design and analytical approach before conducting the study, is put forward as a means to limiting both HARKing and p-hacking.},
language = {English},
keywords = {HARKing,journals,methodology,P-hacking,preregistration,publication bias,questionable research practice,replication},
annotation = {WOS:000559843700001}
}
@article{berlin_declaracion_2003,
title = {La {{Declaraci\'on}} de {{Berl\'in}} Sobre Acceso Abierto},
author = {Berl{\'i}n},
year = {2003},
series = {Sociedad {{Max Planck}}},
volume = {1},
number = {2},
pages = {152--154}
}
@misc{bethesda_declaracion_2003,
title = {Declaraci\'on de {{Bethesda}} Sobre Publicaci\'on de Acceso Abierto},
author = {Bethesda},
year = {2003}
}
@article{bishop_Problems_2016,
title = {Problems in Using P-Curve Analysis and Text-Mining to Detect Rate of p-Hacking and Evidential Value},
author = {Bishop, Dorothy V. M. and Thompson, Paul A.},
year = {2016},
month = feb,
journal = {Peerj},
volume = {4},
pages = {e1715},
publisher = {{Peerj Inc}},
address = {{London}},
issn = {2167-8359},
doi = {10.7717/peerj.1715},
abstract = {Background. The p-curve is a plot of the distribution of p-values reported in a set of scientific studies. Comparisons between ranges of p-values have been used to evaluate fields of research in terms of the extent to which studies have genuine evidential value, and the extent to which they suffer from bias in the selection of variables and analyses for publication, p-hacking. Methods. p-hacking can take various forms. Here we used R code to simulate the use of ghost variables, where an experimenter gathers data on several dependent variables but reports only those with statistically significant effects. We also examined a text-mined dataset used by Head et al. (2015) and assessed its suitability for investigating p-hacking. Results. We show that when there is ghost p-hacking, the shape of the p-curve depends on whether dependent variables are intercorrelated. For uncorrelated variables, simulated p-hacked data do not give the "p-hacking bump" just below .05 that is regarded as evidence of p-hacking, though there is a negative skew when simulated variables are inter-correlated. The way p-curves vary according to features of underlying data poses problems when automated text mining is used to detect p-values in heterogeneous sets of published papers. Conclusions. The absence of a bump in the p-curve is not indicative of lack of p-hacking. Furthermore, while studies with evidential value will usually generate a right-skewed p-curve, we cannot treat a right-skewed p-curve as an indicator of the extent of evidential value, unless we have a model specific to the type of p-values entered into the analysis. We conclude that it is not feasible to use the p-curve to estimate the extent of p-hacking and evidential value unless there is considerable control over the type of data entered into the analysis. In particular, p-hacking with ghost variables is likely to be missed.},
language = {English},
keywords = {Correlation,Ghost variables,p-curve,p-hacking,Power,prevalence,publication,Reproducibility,Simulation,Text-mining},
annotation = {WOS:000370984200010}
}
@article{bishop_Rein_2019,
title = {Rein in the Four Horsemen of Irreproducibility},
author = {Bishop, Dorothy},
year = {2019},
month = apr,
journal = {Nature},
volume = {568},
number = {7753},
pages = {435--435},
publisher = {{Nature Publishing Group}},
doi = {10.1038/d41586-019-01307-2},
abstract = {Dorothy Bishop describes how threats to reproducibility, recognized but unaddressed for decades, might finally be brought under control.},
copyright = {2021 Nature},
language = {en},
keywords = {forrt}
}
@book{bjork_developing_2014,
title = {Developing an {{Effective}} {{Market}} for {{Open Access}} {{Article Processing Charges}}},
author = {Bj{\"o}rk, Bo-Christer and Solomon, David},
year = {2014},
publisher = {{Weolcome Trust}}
}
@article{bjork_gold_2017,
title = {Gold, {{Green}} and {{Black Open Access}}},
author = {Bj{\"o}rk, Bo-Christer},
year = {2017},
journal = {Learned Publishing},
volume = {30},
number = {2},
pages = {173--175},
doi = {10.1002/leap.1096},
abstract = {Universal open access (OA) to scholarly research publications is deceptively simple as a concept. Any scientific publications, whether found via a Google keyword search, or by trying to access a citation would be just one click away. But the path to get there from the current subscription-dominated journal pub- lishing model has proved to be complex and filled with obstacles. Since the terms gold and green OA were coined almost 15 years ago, much of the debate inside the OA movement has been focused on the relative merits of these two paths (Harnad et al., 2004)},
copyright = {Creative Commons Attribution 4.0 International, Open Access},
language = {English}
}
@misc{blanca_informatica_2019,
type = {{Course}},
title = {{Inform\'atica para las ciencias de la vida: Unix y Python}},
author = {Blanca, Jos{\'e}},
year = {2019},
journal = {Bioinformatics at COMAV},
abstract = {The COMAV institute is devoted to the preservation of the horticultural plant variability and to the plant breeding on horticultural species. To accomplish these goals we have developed some software that it might be of some use to other researchers like ngs\_backbone or sff\_extract.},
copyright = {Creative Commons 3.0},
language = {es}
}
@article{blincoe_Research_2020,
title = {Research {{Preregistration}} as a {{Teaching}} and {{Learning Tool}} in {{Undergraduate Psychology Courses}}},
author = {Blincoe, Sarai and Buchert, Stephanie},
year = {2020},
month = mar,
journal = {Psychology Learning and Teaching-Plat},
volume = {19},
number = {1},
pages = {107--115},
publisher = {{Sage Publications Ltd}},
address = {{London}},
issn = {1475-7257},
doi = {10.1177/1475725719875844},
abstract = {The preregistration of research plans and hypotheses may prevent publication bias and questionable research practices. We incorporated a modified version of the preregistration process into an undergraduate capstone research course. Students completed a standard preregistration form during the planning stages of their research projects as well as surveys about their knowledge of preregistration. Based on survey results, our senior-level psychology students lacked knowledge of importance of the preregistration movement in the sciences but could anticipate some of its benefits. Our review of the completed preregistration assignment suggested that students struggle with data analysis decision-making but generally perceive preregistration as a helpful planning tool. We discuss the value of a preregistration assignment for generating discussions of research practice and ethics.},
language = {English},
keywords = {Preregistration,questionable research practices,undergraduate preregistration assignment},
annotation = {WOS:000488433900001}
}
@misc{boai_diez_2012,
title = {{Diez a\~nos desde la Budapest Open Access Initiative: hacia lo abierto por defecto}},
author = {BOAI, Budapest Open Access Initiative},
year = {2012},
collaborator = {Melero, Remedios and Babini, Dominique},
language = {Traducido}
}
@misc{boai_iniciativa_2002,
title = {{Iniciativa de Budapest para el Acceso Abierto}},
author = {BOAI, Budapest Open Access Initiative},
year = {2002},
language = {Traducido}
}
@article{bohannon_who_2016,
title = {Who's Downloading Pirated Papers? {{Everyone}}},
author = {Bohannon, John},
year = {2016},
journal = {American Association for the Advancement of Science},
volume = {352},
number = {6285},
pages = {508--512},
issn = {1095-9203},
doi = {10.1126/science.352.6285.508},
copyright = {Creative Commons Attribution 4.0 International, Open Access}
}
@article{bowers_How_2016,
title = {How to Improve Your Relationship with Your Future Self},
author = {Bowers, Jake and Voors, Maarten},
year = {2016},
month = dec,
journal = {Revista de ciencia pol\'itica (Santiago)},
volume = {36},
number = {3},
pages = {829--848},
issn = {0718-090X},
doi = {10.4067/S0718-090X2016000300011}
}
@article{breznau_does_2021,
title = {Does {{Sociology Need Open Science}}?},
author = {Breznau, Nate},
year = {2021},
month = mar,
journal = {Societies},
volume = {11},
number = {1},
pages = {9},
publisher = {{Multidisciplinary Digital Publishing Institute}},
doi = {10.3390/soc11010009},
abstract = {Reliability, transparency, and ethical crises pushed many social science disciplines toward dramatic changes, in particular psychology and more recently political science. This paper discusses why sociology should also change. It reviews sociology as a discipline through the lens of current practices, definitions of sociology, positions of sociological associations, and a brief consideration of the arguments of three highly influential yet epistemologically diverse sociologists: Weber, Merton, and Habermas. It is a general overview for students and sociologists to quickly familiarize themselves with the state of sociology or explore the idea of open science and its relevance to their discipline.},
copyright = {http://creativecommons.org/licenses/by/3.0/},
language = {en},
keywords = {crisis of science,Habermas,Merton,open science,p-hacking,publication bias,replication,research ethics,revisado,science community,sociology legitimation,transparency,Weber}
}
@misc{breznau_observing_2021,
title = {Observing {{Many Researchers Using}} the {{Same Data}} and {{Hypothesis Reveals}} a {{Hidden Universe}} of {{Uncertainty}}},
author = {Breznau, Nate and Rinke, Eike Mark and Wuttke, Alexander and Adem, Muna and Adriaans, Jule and {Alvarez-Benjumea}, Amalia and Andersen, Henrik Kenneth and Auer, Daniel and Azevedo, Flavio and Bahnsen, Oke and Balzer, Dave and Bauer, Gerrit and Bauer, Paul C. and Baumann, Markus and Baute, Sharon and Benoit, Verena and Bernauer, Julian and Berning, Carl and Berthold, Anna and Bethke, Felix and Biegert, Thomas and Blinzler, Katharina and Blumenberg, Johannes and Bobzien, Licia and Bohman, Andrea and Bol, Thijs and Bostic, Amie and Brzozowska, Zuzanna and Burgdorf, Katharina and Burger, Kaspar and Busch, Kathrin and Castillo, Juan Carlos and Chan, Nathan and Christmann, Pablo and Connelly, Roxanne and Czymara, Christian S. and Damian, Elena and Ecker, Alejandro and Edelmann, Achim and Eger, Maureen A. and Ellerbrock, Simon and Forke, Anna and Forster, Andrea and Gaasendam, Chris and Gavras, Konstantin and Gayle, Vernon and Gessler, Theresa and Gnambs, Timo and Godefroidt, Am{\'e}lie and Gr{\"o}mping, Max and Gro{\ss}, Martin and Gruber, Stefan and Gummer, Tobias and Hadjar, Andreas and Heisig, Jan Paul and Hellmeier, Sebastian and Heyne, Stefanie and Hirsch, Magdalena and Hjerm, Mikael and Hochman, Oshrat and H{\"o}vermann, Andreas and Hunger, Sophia and Hunkler, Christian and Huth, Nora and Ignacz, Zsofia and Jacobs, Laura and Jacobsen, Jannes and Jaeger, Bastian and Jungkunz, Sebastian and Jungmann, Nils and Kauff, Mathias and Kleinert, Manuel and Klinger, Julia and Kolb, Jan-Philipp and Ko{\l}czy{\'n}ska, Marta and Kuk, John Seungmin and Kuni{\ss}en, Katharina and Sinatra, Dafina Kurti and Greinert, Alexander and Lersch, Philipp M. and L{\"o}bel, Lea-Maria and Lutscher, Philipp and Mader, Matthias and Madia, Joan and Malancu, Natalia and Maldonado, Luis and Marahrens, Helge and Martin, Nicole and Martinez, Paul and Mayerl, Jochen and Mayorga, Oscar Jose and McManus, Patricia and Wagner, Kyle and Meeusen, Cecil and Meierrieks, Daniel and Mellon, Jonathan and Merhout, Friedolin and Merk, Samuel and Meyer, Daniel and Micheli, Leticia and Mijs, Jonathan J. B. and Moya, Crist{\'o}bal and Neunhoeffer, Marcel and N{\"u}st, Daniel and Nyg{\aa}rd, Olav and Ochsenfeld, Fabian and Otte, Gunnar and Pechenkina, Anna and Prosser, Christopher and Raes, Louis and Ralston, Kevin and Ramos, Miguel and Roets, Arne and Rogers, Jonathan and Ropers, Guido and Samuel, Robin and Sand, Gregor and Schachter, Ariela and Schaeffer, Merlin and Schieferdecker, David and Schlueter, Elmar and Schmidt, Katja M. and Schmidt, Regine and {Schmidt-Catran}, Alexander and Schmiedeberg, Claudia and Schneider, J{\"u}rgen and Schoonvelde, Martijn and {Schulte-Cloos}, Julia and Schumann, Sandy and Schunck, Reinhard and Schupp, J{\"u}rgen and Seuring, Julian and Silber, Henning and Sleegers, Willem and Sonntag, Nico and Staudt, Alexander and Steiber, Nadia and Steiner, Nils and Sternberg, Sebastian and Stiers, Dieter and Stojmenovska, Dragana and Storz, Nora and Striessnig, Erich and Stroppe, Anne-Kathrin and Teltemann, Janna and Tibajev, Andrey and Tung, Brian B. and Vagni, Giacomo and Assche, Jasper Van and van der Linden, Meta and van der Noll, Jolanda and Hootegem, Arno Van and Vogtenhuber, Stefan and Voicu, Bogdan and Wagemans, Fieke and Wehl, Nadja and Werner, Hannah and Wiernik, Brenton M. and Winter, Fabian and Wolf, Christof and Yamada, Yuki and Zhang, Nan and Ziller, Conrad and Zins, Stefan and {\.Z}{\'o}{\l}tak, Tomasz and Nguyen, Hung H. V.},
year = {2021},
month = mar,
institution = {{MetaArXiv}},
doi = {10.31222/osf.io/cd5j9},
abstract = {How does noise generated by researcher decisions undermine the credibility of science? We test this by observing all decisions made among 73 research teams as they independently conduct studies on the same hypothesis with identical starting data. We find excessive variation of outcomes. When combined, the 107 observed research decisions taken across teams explained at most 2.6\% of the total variance in effect sizes and 10\% of the deviance in subjective conclusions. Expertise, prior beliefs and attitudes of the researchers explain even less. Each model deployed to test the hypothesis was unique, which highlights a vast universe of research design variability that is normally hidden from view and suggests humility when presenting and interpreting scientific findings.},
keywords = {Analytical Flexibility,Crowdsourced Replication Initiative,Crowdsourcing,Economics,Garden of Forking Paths,Immigration,Many Analysts,Meta-Science,Noise,Other Social and Behavioral Sciences,Political Science,Psychology,Researcher Degrees of Freedom,Researcher Variability,Social and Behavioral Sciences,Social Policy,Sociology}
}
@misc{breznau_Open_,
type = {Billet},
title = {Open Science in Sociology. {{What}}, Why and Now.},
author = {Breznau, Nate},
journal = {Crowdid},
abstract = {WHAT By now you've heard the term ``open science''. Although it has no global definition, its advocates tend toward certain agreements. Most definitions focus on the practical aspects of accessibility. ``\ldots the practice of science in such a way that others can collaborate and contribute, where research data, lab notes and other research processes are freely \ldots{} Continue reading Open science in sociology. What, why and now.},
language = {en-US}
}
@techreport{brodeur_Methods_2018,
type = {{{IZA Discussion Paper}}},
title = {Methods {{Matter}}: {{P}}-{{Hacking}} and {{Causal Inference}} in {{Economics}}},
shorttitle = {Methods {{Matter}}},
author = {Brodeur, Abel and Cook, Nikolai and Heyes, Anthony},
year = {2018},
month = aug,
number = {11796},
institution = {{Institute of Labor Economics (IZA)}},
abstract = {The economics 'credibility revolution' has promoted the identification of causal relationships using difference-in-differences (DID), instrumental variables (IV), randomized control trials (RCT) and regression discontinuity design (RDD) methods. The extent to which a reader should trust claims about the statistical significance of results proves very sensitive to method. Applying multiple methods to 13,440 hypothesis tests reported in 25 top economics journals in 2015, we show that selective publication and p-hacking is a substantial problem in research employing DID and (in particular) IV. RCT and RDD are much less problematic. Almost 25\% of claims of marginally significant results in IV papers are misleading.},
keywords = {causal inference,p-curves,p-hacking,practices,publication bias,research methods}
}
@article{brodeur_Star_2016,
title = {Star {{Wars}}: {{The Empirics Strike Back}}},
shorttitle = {Star {{Wars}}},
author = {Brodeur, Abel and L{\'e}, Mathias and Sangnier, Marc and Zylberberg, Yanos},
year = {2016},
month = jan,
journal = {American Economic Journal: Applied Economics},
volume = {8},
number = {1},
pages = {1--32},
issn = {1945-7782},
doi = {10.1257/app.20150044},
abstract = {Using 50,000 tests published in the AER, JPE, and QJE, we identify a residual in the distribution of tests that cannot be explained solely by journals favoring rejection of the null hypothesis. We observe a two-humped camel shape with missing p-values between 0.25 and 0.10 that can be retrieved just after the 0.05 threshold and represent 10-20 percent of marginally rejected tests. Our interpretation is that researchers inflate the value of just-rejected tests by choosing "significant" specifications. We propose a method to measure this residual and describe how it varies by article and author characteristics. (JEL A11, C13)},
language = {en},
keywords = {Market for Economists; Estimation: General,Role of Economics,Role of Economists}
}
@misc{budapestopenaccessinitiative_diez_2012,
title = {{Diez a\~nos desde la Budapest Open Access Initiative: hacia lo abierto por defecto}},
author = {Budapest Open Access Initiative},
year = {12 de Septiembre, 2012},
journal = {BOAI},
language = {Traducido}
}
@misc{budapestopenaccessinitiative_iniciativa_2002,
title = {{Iniciativa de Budapest para el Aceso Abierto}},
author = {Budapest Open Access Initiative},
year = {14 de Febrero, 2002},
journal = {Budapest Open Access Initiative},
language = {Traducido}
}
@article{burlig_Improving_2018,
title = {Improving Transparency in Observational Social Science Research: {{A}} Pre-Analysis Plan Approach},
shorttitle = {Improving Transparency in Observational Social Science Research},
author = {Burlig, Fiona},
year = {2018},
month = jul,
journal = {Economics Letters},
volume = {168},
pages = {56--60},
issn = {0165-1765},
doi = {10.1016/j.econlet.2018.03.036},
abstract = {Social science research has undergone a credibility revolution, but these gains are at risk due to problematic research practices. Existing research on transparency has centered around randomized controlled trials, which constitute only a small fraction of research in economics. In this paper, I highlight three scenarios in which study preregistration can be credibly applied in non-experimental settings: cases where researchers collect their own data; prospective studies; and research using restricted-access data.},
language = {en},
keywords = {Confidential data,Observational research,Pre-registration,Transparency}
}
@misc{businessmanagementink_protecting_2016,
title = {Protecting {{Students}}' {{Intellectual Property}}},
author = {Business \& Management INK},
year = {2016},
journal = {social science space}
}
@article{buttner_Are_2020,
title = {Are Questionable Research Practices Facilitating New Discoveries in Sport and Exercise Medicine? {{The}} Proportion of Supported Hypotheses Is Implausibly High},
shorttitle = {Are Questionable Research Practices Facilitating New Discoveries in Sport and Exercise Medicine?},
author = {Buttner, Fionn and Toomey, Elaine and McClean, Shane and Roe, Mark and Delahunt, Eamonn},
year = {2020},
month = nov,
journal = {British Journal of Sports Medicine},
volume = {54},
number = {22},
pages = {1365--1371},
publisher = {{Bmj Publishing Group}},
address = {{London}},
issn = {0306-3674},
doi = {10.1136/bjsports-2019-101863},
abstract = {Questionable research practices (QRPs) are intentional and unintentional practices that can occur when designing, conducting, analysing, and reporting research, producing biased study results. Sport and exercise medicine (SEM) research is vulnerable to the same QRPs that pervade the biomedical and psychological sciences, producing false-positive results and inflated effect sizes. Approximately 90\% of biomedical research reports supported study hypotheses, provoking suspicion about the field-wide presence of systematic biases to facilitate study findings that confirm researchers' expectations. In this education review, we introduce three common QRPs (ie, HARKing, P-hacking and Cherry-picking), perform a cross-sectional study to assess the proportion of original SEM research that reports supported study hypotheses, and draw attention to existing solutions and resources to overcome QRPs that manifest in exploratory research. We hypothesised that {$>$}= 85\% of original SEM research studies would report supported study hypotheses. Two independent assessors systematically identified, screened, included, and extracted study data from original research articles published between 1 January 2019 and 31 May 2019 in the British Journal of Sports Medicine, Sports Medicine, the American Journal of Sports Medicine, and the Journal of Orthopaedic \& Sports Physical Therapy. We extracted data relating to whether studies reported that the primary hypothesis was supported or rejected by the results. Study hypotheses, methodologies, and analysis plans were preregistered at the Open Science Framework. One hundred and twenty-nine original research studies reported at least one study hypothesis, of which 106 (82.2\%) reported hypotheses that were supported by study results. Of 106 studies reporting that primary hypotheses were supported by study results, 75 (70.8\%) studies reported that the primary hypothesis was fully supported by study results. The primary study hypothesis was partially supported by study results in 28 (26.4\%) studies. We detail open science practices and resources that aim to safe-guard against QRPs that bely the credibility and replicability of original research findings.},
language = {English},
keywords = {education,harking,incentives,methodological,publication decisions,publish,registered-reports,replicability,research,science,sport,statistics,tests,truth},
annotation = {WOS:000584953300013}
}
@article{button_Power_2013,
title = {Power Failure: Why Small Sample Size Undermines the Reliability of Neuroscience},
shorttitle = {Power Failure},
author = {Button, Katherine S. and Ioannidis, John P. A. and Mokrysz, Claire and Nosek, Brian A. and Flint, Jonathan and Robinson, Emma S. J. and Munaf{\`o}, Marcus R.},
year = {2013},
month = may,
journal = {Nature Reviews Neuroscience},
volume = {14},
number = {5},
pages = {365--376},
publisher = {{Nature Publishing Group}},
issn = {1471-0048},
doi = {10.1038/nrn3475},
abstract = {Low statistical power undermines the purpose of scientific research; it reduces the chance of detecting a true effect.Perhaps less intuitively, low power also reduces the likelihood that a statistically significant result reflects a true effect.Empirically, we estimate the median statistical power of studies in the neurosciences is between {$\sim$}8\% and {$\sim$}31\%.We discuss the consequences of such low statistical power, which include overestimates of effect size and low reproducibility of results.There are ethical dimensions to the problem of low power; unreliable research is inefficient and wasteful.Improving reproducibility in neuroscience is a key priority and requires attention to well-established, but often ignored, methodological principles.We discuss how problems associated with low power can be addressed by adopting current best-practice and make clear recommendations for how to achieve this.},
copyright = {2013 Nature Publishing Group, a division of Macmillan Publishers Limited. All Rights Reserved.},
language = {en},
keywords = {practices}
}
@article{byington_Solutions_2017,
title = {Solutions to the {{Credibility Crisis}} in {{Management Science}}},
author = {Byington, Eliza and Felps, Will},
year = {2017},
month = mar,
journal = {Academy of Management Learning and Education, The},
volume = {16},
pages = {142--162},
doi = {10.5465/amle.2015.0035},
abstract = {This article argues much academic misconduct can be explained as the result of social dilemmas occurring at two levels of Management science. First, the career benefits associated with engaging in Noncredible Research Practices (NCRPs) (e.g. data manipulation, fabricating results, data hoarding, undisclosed HARKing) result in many academics choosing self-interest over collective welfare. These perverse incentives derive from journal gatekeepers who are pressed into a similar social dilemma. Namely, an individual journal's status (i.e. its ``impact factor'') is likely to suffer from unilaterally implementing practices that help ensure the credibility of Management science claims (e.g. dedicating journal space to strict replications, crowd-sourcing replications, data submission requirements, in-house analysis checks, registered reports, Open Practice badges). Fortunately, research on social dilemmas and collective action offers solutions. For example, journal editors could pledge to publish a certain number of credibility boosting articles contingent on a proportion of their ``peer'' journals doing the same. Details for successful implementation of conditional pledges, other social dilemma solutions \textendash{} including actions for Management academics who support changes in journal practices (e.g. reviewer boycotts / buycotts), and insights on credibility supportive journal practices from other fields are provided.},
keywords = {crisis}
}
@article{caldwell_Moving_2020,
title = {Moving {{Sport}} and {{Exercise Science Forward}}: {{A Call}} for the {{Adoption}} of {{More Transparent Research Practices}}},
shorttitle = {Moving {{Sport}} and {{Exercise Science Forward}}},
author = {Caldwell, Aaron R. and Vigotsky, Andrew D. and Tenan, Matthew S. and Radel, R{\'e}mi and Mellor, David T. and Kreutzer, Andreas and Lahart, Ian M. and Mills, John P. and Boisgontier, Matthieu P. and Boardley, Ian and Bouza, Brooke and Cheval, Boris and Chow, Zad Rafi and Contreras, Bret and Dieter, Brad and Halperin, Israel and Haun, Cody and Knudson, Duane and Lahti, Johan and Miller, Matthew and Morin, Jean-Benoit and Naughton, Mitchell and Neva, Jason and Nuckols, Greg and Peters, Sue and Roberts, Brandon and {Rosa-Caldwell}, Megan and Schmidt, Julia and Schoenfeld, Brad J. and Severin, Richard and Skarabot, Jakob and Steele, James and Twomey, Rosie and Zenko, Zachary and Lohse, Keith R. and Nunan, David and {Consortium for Transparency in Exercise Science (COTES) Collaborators}},
year = {2020},
month = mar,
journal = {Sports Medicine},
volume = {50},
number = {3},
pages = {449--459},
issn = {1179-2035},
doi = {10.1007/s40279-019-01227-1},
abstract = {The primary means of disseminating sport and exercise science research is currently through journal articles. However, not all studies, especially those with null findings, make it to formal publication. This publication bias towards positive findings may contribute to questionable research practices. Preregistration is a solution to prevent the publication of distorted evidence resulting from this system. This process asks authors to register their hypotheses and methods before data collection on a publicly available repository or by submitting a Registered Report. In the Registered Report format, authors submit a stage 1 manuscript to a participating journal that includes an introduction, methods, and any pilot data indicating the exploratory or confirmatory nature of the study. After a stage 1 peer review, the manuscript can then be offered in-principle acceptance, rejected, or sent back for revisions to improve the quality of the study. If accepted, the project is guaranteed publication, assuming the authors follow the data collection and analysis protocol. After data collection, authors re-submit a stage 2 manuscript that includes the results and discussion, and the study is evaluated on clarity and conformity with the planned analysis. In its final form, Registered Reports appear almost identical to a typical publication, but give readers confidence that the hypotheses and main analyses are less susceptible to bias from questionable research practices. From this perspective, we argue that inclusion of Registered Reports by researchers and journals will improve the transparency, replicability, and trust in sport and exercise science research. The preprint version of this work is available on SportR\$\$\textbackslash chi \$\$iv: https://osf.io/preprints/sportrxiv/fxe7a/.},
language = {en},
keywords = {transparency}
}
@article{caldwell_Moving_2020a,
title = {Moving {{Sport}} and {{Exercise Science Forward}}: {{A Call}} for the {{Adoption}} of {{More Transparent Research Practices}}},
shorttitle = {Moving {{Sport}} and {{Exercise Science Forward}}},
author = {Caldwell, Aaron R. and Vigotsky, Andrew D. and Tenan, Matthew S. and Radel, Remi and Mellor, David T. and Kreutzer, Andreas and Lahart, Ian M. and Mills, John P. and Boisgontier, Matthieu P. and Boardley, Ian and Bouza, Brooke and Cheval, Boris and Chow, Zad Rafi and Contreras, Bret and Dieter, Brad and Halperin, Israel and Haun, Cody and Knudson, Duane and Lahti, Johan and Lohse, Keith and Miller, Matthew and Morin, Jean-Benoit and Naughton, Mitchell and Neva, Jason and Nuckols, Greg and Nunan, David and Peters, Sue and Roberts, Brandon and {Rosa-Caldwell}, Megan and Schmidt, Julia and Schoenfeld, Brad J. and Severin, Richard and Skarabot, Jakob and Steele, James and Twomey, Rosie and Zenko, Zachary},
year = {2020},
month = mar,
journal = {Sports Medicine},
volume = {50},
number = {3},
pages = {449--459},
publisher = {{Adis Int Ltd}},
address = {{Northcote}},
issn = {0112-1642},
doi = {10.1007/s40279-019-01227-1},
abstract = {The primary means of disseminating sport and exercise science research is currently through journal articles. However, not all studies, especially those with null findings, make it to formal publication. This publication bias towards positive findings may contribute to questionable research practices. Preregistration is a solution to prevent the publication of distorted evidence resulting from this system. This process asks authors to register their hypotheses and methods before data collection on a publicly available repository or by submitting a Registered Report. In the Registered Report format, authors submit a stage 1 manuscript to a participating journal that includes an introduction, methods, and any pilot data indicating the exploratory or confirmatory nature of the study. After a stage 1 peer review, the manuscript can then be offered in-principle acceptance, rejected, or sent back for revisions to improve the quality of the study. If accepted, the project is guaranteed publication, assuming the authors follow the data collection and analysis protocol. After data collection, authors re-submit a stage 2 manuscript that includes the results and discussion, and the study is evaluated on clarity and conformity with the planned analysis. In its final form, Registered Reports appear almost identical to a typical publication, but give readers confidence that the hypotheses and main analyses are less susceptible to bias from questionable research practices. From this perspective, we argue that inclusion of Registered Reports by researchers and journals will improve the transparency, replicability, and trust in sport and exercise science research. The preprint version of this work is available on SportRiv: https://osf.io/ prepr ints/sport rxiv/fxe7a/.},
language = {English},
keywords = {associations,health,increase,model,power,publication bias,registered-reports,true},
annotation = {WOS:000511041300002}
}
@article{camerer_Evaluating_2018,
title = {Evaluating the Replicability of Social Science Experiments in {{Nature}} and {{Science}} between 2010 and 2015},
author = {Camerer, Colin F. and Dreber, Anna and Holzmeister, Felix and Ho, Teck-Hua and Huber, J{\"u}rgen and Johannesson, Magnus and Kirchler, Michael and Nave, Gideon and Nosek, Brian A. and Pfeiffer, Thomas and Altmejd, Adam and Buttrick, Nick and Chan, Taizan and Chen, Yiling and Forsell, Eskil and Gampa, Anup and Heikensten, Emma and Hummer, Lily and Imai, Taisuke and Isaksson, Siri and Manfredi, Dylan and Rose, Julia and Wagenmakers, Eric-Jan and Wu, Hang},
year = {2018},
month = sep,
journal = {Nature Human Behaviour},
volume = {2},
number = {9},
pages = {637--644},
publisher = {{Nature Publishing Group}},
issn = {2397-3374},
doi = {10.1038/s41562-018-0399-z},
abstract = {Being able to replicate scientific findings is crucial for scientific progress1\textendash 15. We replicate 21 systematically selected experimental studies in the social sciences published in Nature and Science between 2010 and 201516\textendash 36. The replications follow analysis plans reviewed by the original authors and pre-registered prior to the replications. The replications are high powered, with sample sizes on average about five times higher than in the original studies. We find a significant effect in the same direction as the original study for 13 (62\%) studies, and the effect size of the replications is on average about 50\% of the original effect size. Replicability varies between 12 (57\%) and 14 (67\%) studies for complementary replicability indicators. Consistent with these results, the estimated true-positive rate is 67\% in a Bayesian analysis. The relative effect size of true positives is estimated to be 71\%, suggesting that both false positives and inflated effect sizes of true positives contribute to imperfect reproducibility. Furthermore, we find that peer beliefs of replicability are strongly related to replicability, suggesting that the research community could predict which results would replicate and that failures to replicate were not the result of chance alone.},
copyright = {2018 The Author(s)},
language = {en},
keywords = {crisis}
}
@article{campbell_Enhancing_2014,
title = {Enhancing Transparency of the Research Process to Increase Accuracy of Findings: {{A}} Guide for Relationship Researchers},
shorttitle = {Enhancing Transparency of the Research Process to Increase Accuracy of Findings},
author = {Campbell, Lorne and Loving, Timothy J. and Lebel, Etienne P.},
year = {2014},
journal = {Personal Relationships},
volume = {21},
number = {4},
pages = {531--545},
issn = {1475-6811},
doi = {10.1111/pere.12053},
abstract = {The purpose of this paper is to extend to the field of relationship science, recent discussions and suggested changes in open research practises. We demonstrate different ways that greater transparency of the research process in our field will accelerate scientific progress by increasing accuracy of reported research findings. Importantly, we make concrete recommendations for how relationship researchers can transition to greater disclosure of research practices in a manner that is sensitive to the unique design features of methodologies employed by relationship scientists. We discuss how to implement these recommendations for four different research designs regularly used in relationship research and practical limitations regarding implementing our recommendations and provide potential solutions to these problems.},
language = {en},
annotation = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/pere.12053}
}
@article{card_Role_2011,
title = {The {{Role}} of {{Theory}} in {{Field Experiments}}},
author = {Card, David and DellaVigna, Stefano and Malmendier, Ulrike},
year = {2011},
month = sep,
journal = {Journal of Economic Perspectives},
volume = {25},
number = {3},
pages = {39--62},
issn = {0895-3309},
doi = {10.1257/jep.25.3.39},
abstract = {studies that estimate structural parameters in a completely specified model. We also classify laboratory experiments published in these journals over the same period and find that economic theory has played a more central role in the laboratory than in the field. Finally, we discuss in detail three sets of field experiments\textemdash on gift exchange, on charitable giving, and on negative income tax\textemdash that illustrate both the benefits and the potential costs of a tighter link between experimental design and theoretical underpinnings.},
language = {en},
keywords = {Field Experiments}
}
@article{carey_Fraud_2011,
title = {Fraud {{Case Seen}} as a {{Red Flag}} for {{Psychology Research}}},
author = {Carey, Benedict},
year = {2011},
month = nov,
journal = {The New York Times},
issn = {0362-4331},
abstract = {A Dutch scholar was found to have falsified findings in dozens of papers, in a field that critics say is vulnerable to such abuses.},
chapter = {Health},
language = {en-US},
keywords = {Falsification of Data,Frauds and Swindling,Psychology and Psychologists,Research,Stapel; Diederik}
}
@article{carrier_Facing_2017,
title = {Facing the {{Credibility Crisis}} of {{Science}}: {{On}} the {{Ambivalent Role}} of {{Pluralism}} in {{Establishing Relevance}} and {{Reliability}}},
shorttitle = {Facing the {{Credibility Crisis}} of {{Science}}},
author = {Carrier, Martin},
year = {2017},
month = may,
journal = {Perspectives on Science},
volume = {25},
number = {4},
pages = {439--464},
issn = {1063-6145},
doi = {10.1162/POSC_a_00249},
abstract = {Science at the interface with society is regarded with mistrust among parts of the public. Scientific judgments on matters of practical concern are not infrequently suspected of being incompetent and biased. I discuss two proposals for remedying this deficiency. The first aims at strengthening the independence of science and suggests increasing the distance to political and economic powers. The drawback is that this runs the risk of locking science in an academic ivory tower. The second proposal favors ``counter-politicization'' in that research is strongly focused on projects ``in the public interest,'' that is, on projects whose expected results will benefit all those concerned by these results. The disadvantage is that the future use of research findings cannot be delineated reliably in advance. I argue that the underlying problem is the perceived lack of relevance and reliability and that pluralism is an important step toward its solution. Pluralism serves to stimulate a more inclusive research agenda and strengthens the well-testedness of scientific approaches. However, pluralism also prevents the emergence of clear-cut practical suggestions. Accordingly, pluralism is part of the solution to the credibility crisis of science, but also part of the problem. In order for science to be suitable as a guide for practice, the leeway of scientific options needs to be narrowed \textendash{} in spite of uncertainty in epistemic respect. This reduction can be achieved by appeal to criteria that do not focus on the epistemic credentials of the suggestions but on their appropriateness in practical respect.},
keywords = {crisis}
}
@article{chambers_Registered_2013,
title = {Registered {{Reports}}: {{A}} New Publishing Initiative at~{{Cortex}}},
shorttitle = {Registered {{Reports}}},
author = {Chambers, Christopher D.},
year = {2013},
month = mar,
journal = {Cortex},
volume = {49},
number = {3},
pages = {609--610},
issn = {0010-9452},
doi = {10.1016/j.cortex.2012.12.016},
language = {en},
keywords = {forrt,reports}
}
@misc{chambers_Registered_2014,
title = {Registered {{Reports}}: {{A}} Step Change in Scientific Publishing},
author = {Chambers, Christopher D.},
year = {2014},
journal = {Reviewers' Update},
abstract = {Professor Chris Chambers, Registered Reports Editor of the Elsevier journal Cortex and one of the concept's founders, on how the initiative combats publication bias},
howpublished = {https://www.elsevier.com/connect/reviewers-update/registered-reports-a-step-change-in-scientific-publishing},
language = {en},
keywords = {forrt,reports}
}
@article{chambers_Registered_2015,
title = {Registered Reports: Realigning Incentives in Scientific Publishing},
shorttitle = {Registered Reports},
author = {Chambers, Christopher D. and Dienes, Zoltan and McIntosh, Robert D. and Rotshtein, Pia and Willmes, Klaus},
year = {2015},
month = may,
journal = {Cortex; a Journal Devoted to the Study of the Nervous System and Behavior},
volume = {66},
pages = {A1-2},
issn = {1973-8102},
doi = {10.1016/j.cortex.2015.03.022},
language = {eng},
pmid = {25892410},
keywords = {Biomedical Research,Editorial Policies,forrt,Humans,Motivation,Peer Review; Research,Publication Bias,Publishing,reports,Reproducibility of Results}
}
@article{chambers_Ten_2015,
title = {Ten {{Reasons Why Journals Must Review Manuscripts Before Results Are Known}}},
author = {Chambers, Christopher D.},
year = {2015},
month = jan,
journal = {Addiction},
volume = {110},
number = {1},
pages = {10--11},
publisher = {{Wiley}},
address = {{Hoboken}},
issn = {0965-2140},
doi = {10.1111/add.12728},
language = {English},
keywords = {False positives,incentives,publication bias,questionable research practices,registered reports,registered-reports,reproducibility,study pre-registration,truth},
annotation = {WOS:000346699700004}
}
@techreport{chin_Improving_2019,
type = {{{SSRN Scholarly Paper}}},
title = {Improving {{Expert Evidence}}: {{The Role}} of {{Open Science}} and {{Transparency}}},
shorttitle = {Improving {{Expert Evidence}}},
author = {Chin, Jason and Growns, Bethany and Mellor, David},
year = {2019},
month = feb,
number = {ID 3345225},
address = {{Rochester, NY}},
institution = {{Social Science Research Network}},
doi = {10.2139/ssrn.3345225},
abstract = {Both science and expert evidence law are undergoing significant changes. In this article, the authors compare these two movements \textendash{} the open science movement and the evidence-based evidence movement. The open science movement encompasses the recent discovery of many irreproducible findings in science and the subsequent move towards more transparent methods. The evidence-based evidence movement is the discovery that many forms of expert evidence are unreliable, and that they have contributed to wrongful convictions. The authors identify similarities between these movements, which suggest how courts and legal actors may learn from the open science movement to produce more accurate results. Expert witnesses should comport themselves as rigorous open scientists to produce evidence that is more susceptible to evaluation. Parties should be subjected to more specific and rigorous disclosure requirements because research has shown that even leading scientists find it easy to discount and suppress findings that do not support their hypotheses. And trial judges, as gatekeepers, should not defer to the generally accepted practices that have proven insufficient in the mainstream sciences.},
language = {en},
keywords = {Bethany Growns,David Mellor,Improving Expert Evidence: The Role of Open Science and Transparency,Jason Chin,SSRN}
}
@article{chin_Questionable_2021,
title = {Questionable {{Research Practices}} and {{Open Science}} in {{Quantitative Criminology}}},
author = {Chin, Jason M. and Pickett, Justin T. and Vazire, Simine and Holcombe, Alex O.},
year = {2021},
journal = {Journal of Quantitative Criminology},
issn = {0748-4518},
doi = {10.1007/s10940-021-09525-6},
abstract = {Questionable research practices (QRPs) lead to incorrect research results and contribute to irreproducibility in science. Researchers and institutions have proposed open science practices (OSPs) to improve the detectability of QRPs and the credibility of science. We examine the prevalence of QRPs and OSPs in criminology, and researchers' opinions of those practices. We administered an anonymous survey to authors of articles published in criminology journals. Respondents self-reported their own use of 10 QRPs and 5 OSPs. They also estimated the prevalence of use by others, and reported their attitudes toward the practices. QRPs and OSPs are both common in quantitative criminology, about as common as they are in other fields. Criminologists who responded to our survey support using QRPs in some circumstances, but are even more supportive of using OSPs. We did not detect a significant relationship between methodological training and either QRP or OSP use. Support for QRPs is negatively and significantly associated with support for OSPs. Perceived prevalence estimates for some practices resembled a uniform distribution, suggesting criminologists have little knowledge of the proportion of researchers that engage in certain questionable practices. Most quantitative criminologists in our sample have used QRPs, and many have used multiple QRPs. Moreover, there was substantial support for QRPs, raising questions about the validity and reproducibility of published criminological research. We found promising levels of OSP use, albeit at levels lagging what researchers endorse. The findings thus suggest that additional reforms are needed to decrease QRP use and increase the use of OSPs.},
language = {en}
}
@article{chopik_Relationship_2020,
title = {Relationship Science and the Credibility Revolution: {{An}} Introduction to the First Part of the Special Issue},
shorttitle = {Relationship Science and the Credibility Revolution},
author = {Chopik, William J. and Chartier, Christopher R. and Campbell, Lorne and Donnellan, M. Brent},
year = {2020},
month = mar,
journal = {Personal Relationships},
volume = {27},
number = {1},
pages = {132--137},
publisher = {{Wiley}},
address = {{Hoboken}},
issn = {1350-4126},
doi = {10.1111/pere.12312},
abstract = {In the past 10 years, the field of relationship science-like many other fields-has been exposed to dramatic changes in how scientists approach the research process. Relationship science has been at the forefront of many recent changes in the field, whether it be high profile replication attempts or broader discussions about how to increase rigor and reproducibility. A major goal of this special issue was to provide an opportunity for relationship scientists to engage with these issues and reforms. The first four articles in this special issue represent a sampling of different approaches relationship researchers have used to enhance the credibility of their work.},
language = {English},
keywords = {credibility revolution,history,incentives,increase,personal relationships,registered reports,registered-reports,replication,special issue,truth},
annotation = {WOS:000518878700007}
}
@article{christensen_Transparency_2018,
title = {Transparency, {{Reproducibility}}, and the {{Credibility}} of {{Economics Research}}},
author = {Christensen, Garret and Miguel, Edward},
year = {2018},
month = sep,
journal = {Journal of Economic Literature},
volume = {56},
number = {3},
pages = {920--980},
issn = {0022-0515},
doi = {10.1257/jel.20171350},
abstract = {There is growing interest in enhancing research transparency and reproducibility in economics and other scientific fields. We survey existing work on these topics within economics, and discuss the evidence suggesting that publication bias, inability to replicate, and specification searching remain widespread in the discipline. We next discuss recent progress in this area, including through improved research design, study registration and pre-analysis plans, disclosure standards, and open sharing of data and materials, drawing on experiences in both economics and other social sciences. We discuss areas where consensus is emerging on new practices, as well as approaches that remain controversial, and speculate about the most effective ways to make economics research more credible in the future.},
language = {en},
keywords = {Market for Economists; Methodological Issues: General; Higher Education,Research Institutions,Role of Economics,Role of Economists}
}
@book{christensen_Transparent_2019,
title = {Transparent and Reproducible Social Science Research: How to Do Open Science},
shorttitle = {Transparent and Reproducible Social Science Research},
author = {Christensen, Garret S. and Freese, Jeremy and Miguel, Edward},
year = {2019},
publisher = {{University of California Press}},
address = {{Oakland, California}},
abstract = {"Social science practitioners have recently witnessed numerous episodes of influential research that fell apart upon close scrutiny. These instances have spurred suspicions that other published results may contain errors or may at least be less robust than they appear. In response, an influential movement has emerged across the social sciences for greater research transparency, openness, and reproducibility. Transparent and Reproducible Social Science Research crystallizes the new insights, practices, and methods of this rising interdisciplinary field"--Provided by publisher},
isbn = {978-0-520-96923-0},
lccn = {Q180.55.S7},
keywords = {Reproducible research,Research,Social sciences,transparency}
}
@article{chuard_Evidence_2019,
title = {Evidence That Nonsignificant Results Are Sometimes Preferred: {{Reverse P}}-Hacking or Selective Reporting?},
shorttitle = {Evidence That Nonsignificant Results Are Sometimes Preferred},
author = {Chuard, Pierre J. C. and Vrtilek, Milan and Head, Megan L. and Jennions, Michael D.},
year = {2019},
month = jan,
journal = {Plos Biology},
volume = {17},
number = {1},
pages = {e3000127},
publisher = {{Public Library Science}},
address = {{San Francisco}},
issn = {1544-9173},
doi = {10.1371/journal.pbio.3000127},
abstract = {There is increased concern about poor scientific practices arising from an excessive focus on P-values. Two particularly worrisome practices are selective reporting of significant results and 'P-hacking'. The latter is the manipulation of data collection, usage, or analyses to obtain statistically significant outcomes. Here, we introduce the novel, to our knowledge, concepts of selective reporting of nonsignificant results and 'reverse P-hacking' whereby researchers ensure that tests produce a nonsignificant result. We test whether these practices occur in experiments in which researchers randomly assign subjects to treatment and control groups to minimise differences in confounding variables that might affect the focal outcome. By chance alone, 5\% of tests for a group difference in confounding variables should yield a significant result (P {$<$} 0.05). If researchers less often report significant findings and/or reverse P-hack to avoid significant outcomes that undermine the ethos that experimental and control groups only differ with respect to actively manipulated variables, we expect significant results from tests for group differences to be under-represented in the literature. We surveyed the behavioural ecology literature and found significantly more nonsignificant P-values reported for tests of group differences in potentially confounding variables than the expected 95\% (P = 0.005; N = 250 studies). This novel, to our knowledge, publication bias could result from selective reporting of nonsignificant results and/or from reverse P-hacking. We encourage others to test for a bias toward publishing nonsignificant results in the equivalent context in their own research discipline.},
language = {English},
keywords = {ecology,randomized controlled-trials,values},
annotation = {WOS:000457596000029}
}
@article{chubin_Open_1985,
title = {Open {{Science}} and {{Closed Science}}: {{Tradeoffs}} in a {{Democracy}}},
shorttitle = {Open {{Science}} and {{Closed Science}}},
author = {Chubin, Daryl E.},
year = {1985},
month = apr,
journal = {Science, Technology, \& Human Values},
volume = {10},
number = {2},
pages = {73--80},
publisher = {{SAGE Publications Inc}},
issn = {0162-2439},
doi = {10.1177/016224398501000211},
language = {en}
}