@article{Maden_Kotas_2016, title={Evaluating Approaches to Quality Assessment in Library and Information Science LIS Systematic Reviews: A Methodology Review}, volume={11}, url={https://journals.library.ualberta.ca/eblip/index.php/EBLIP/article/view/26988}, DOI={10.18438/B8F630}, abstractNote={Objective – Systematic reviews are becoming increasingly popular within the Library and Information Science (LIS) domain. This paper has three aims: to review approaches to quality assessment in published LIS systematic reviews in order to assess whether and how LIS reviewers report on quality assessment a priori in systematic reviews, to model the different quality assessment aids used by LIS reviewers, and to explore if and how LIS reviewers report on and incorporate the quality of included studies into the systematic review analysis and conclusions.<br> <br> Methods – The authors undertook a methodological study of published LIS systematic reviews using a known cohort of published systematic reviews of LIS-related research. Studies were included if they were reported as a “systematic review” in the title, abstract, or methods section. Meta-analyses that did not incorporate a systematic review and studies in which the systematic review was not a main objective were excluded. Two reviewers independently assessed the studies. Data were extracted on the type of synthesis, whether quality assessment was planned and undertaken, the number of reviewers involved in assessing quality, the types of tools or criteria used to assess the quality of the included studies, how quality assessment was assessed and reported in the systematic review, and whether the quality of the included studies was considered in the analysis and conclusions of the review. In order to determine the quality of the reporting and incorporation of quality assessment in LIS systematic reviews, each study was assessed against criteria relating to quality assessment in the PRISMA reporting guidelines for systematic reviews and meta-analyses (Moher, Liberati, Tetzlaff, Altman, & The PRISMA Group, 2009) and the AMSTAR tool (Shea et al., 2007).<br> <br> Results – Forty studies met the inclusion criteria. The results demonstrate great variation on the breadth, depth, and transparency of the quality assessment process in LIS systematic reviews. Nearly one third of the LIS systematic reviews included in this study did not report on quality assessment in the methods, and less than one quarter adequately incorporated quality assessment in the analysis, conclusions, and recommendations. Only nine of the 26 systematic reviews that undertook some form of quality assessment incorporated considerations of how the quality of the included studies impacted on the validity of the review findings in the analysis, conclusion, and recommendations. The large number of different quality assessment tools identified reflects not only the disparate nature of the LIS evidence base (Brettle, 2009) but also a lack of consensus around criteria on which to assess the quality of LIS research.<br> <br> Conclusion – Greater clarity, definition, and understanding of the methodology and concept of “quality” in the systematic review process are required not only by LIS reviewers but also by editors of journals in accepting such studies for publication. Further research and guidance is needed on identifying the best tools and approaches to incorporate considerations of quality in LIS systematic reviews. LIS reviewers need to improve the robustness and transparency with which quality assessment is undertaken and reported in systematic reviews. Above all, LIS reviewers need to be explicit in coming to a conclusion on how the quality of the included studies may impact on their review findings.}, number={2}, journal={Evidence Based Library and Information Practice}, author={Maden, Michelle and Kotas, Eleanor}, year={2016}, month={Jun.}, pages={149–176} }