@article {2625, title = {What Information Works Best?: A Comparison of Routing Methods}, journal = {Applied Psychological Measurement}, volume = {42}, number = {6}, year = {2018}, pages = {499-515}, abstract = {There are many item selection methods proposed for computerized adaptive testing (CAT) applications. However, not all of them have been used in computerized multistage testing (ca-MST). This study uses some item selection methods as a routing method in ca-MST framework. These are maximum Fisher information (MFI), maximum likelihood weighted information (MLWI), maximum posterior weighted information (MPWI), Kullback{\textendash}Leibler (KL), and posterior Kullback{\textendash}Leibler (KLP). The main purpose of this study is to examine the performance of these methods when they are used as a routing method in ca-MST applications. These five information methods under four ca-MST panel designs and two test lengths (30 items and 60 items) were tested using the parameters of a real item bank. Results were evaluated with overall findings (mean bias, root mean square error, correlation between true and estimated thetas, and module exposure rates) and conditional findings (conditional absolute bias, standard error of measurement, and root mean square error). It was found that test length affected the outcomes much more than other study conditions. Under 30-item conditions, 1-3 designs outperformed other panel designs. Under 60-item conditions, 1-3-3 designs were better than other panel designs. Each routing method performed well under particular conditions; there was no clear best method in the studied conditions. The recommendations for routing methods in any particular condition were provided for researchers and practitioners as well as the limitations of these results.}, doi = {10.1177/0146621617752990}, url = {https://doi.org/10.1177/0146621617752990}, author = {Halil Ibrahim Sari and Anthony Raborn} } @conference {2101, title = {Walking the Tightrope: Using Better Content Control to Improve CAT}, booktitle = {Annual Conference of the International Association for Computerized Adaptive Testing}, year = {2011}, month = {10/2011}, abstract = {

All testing involves a balance between measurement precision and content considerations. CAT item-selection algorithms have evolved to accommodate content considerations. Reviews CAT evolution including: Original/\”Pure\” adaptive exams, Constrained CAT, Weighted-deviations method, Shadow-Test Approach, Testlets instead of fully adapted tests, Administration of one item may preclude the administration of other item(s), and item relationships.

Research Questions

}, keywords = {CAT, CAT evolution, test content}, author = {Kathleen A. Gialluca} } @article {76, title = {When cognitive diagnosis meets computerized adaptive testing: CD-CAT}, journal = {Psychometrika}, volume = {74}, number = {4}, year = {2009}, pages = {619-632}, author = {Cheng, Y} } @article {128, title = {The Wald{\textendash}Wolfowitz Theorem Is Violated in Sequential Mastery Testing}, journal = {Sequential Analysis}, volume = {27}, number = {3}, year = {2008}, pages = {293-303}, author = {Finkelman, M.} } @inbook {108, title = {The work ahead: A psychometric infrastructure for computerized adaptive tests}, booktitle = {Computer-based tests: Building the foundation for future assessment}, year = {2002}, note = {Using Smart Source ParsingComputer-based testing: Building the foundation for future assessments. (pp. 1-35). Mahwah, NJ : Lawrence Erlbaum Associates, Publishers. xi, 326 pp}, publisher = {Lawrence Erlbaum Associates, Inc.}, organization = {Lawrence Erlbaum Associates, Inc.}, address = {Mahwah, N.J. USA}, abstract = {(From the chapter) Considers the past and future of computerized adaptive tests and computer-based tests and looks at issues and challenges confronting a testing program as it implements and operates a computer-based test. Recommendations for testing programs from The National Council of Measurement in Education Ad Hoc Committee on Computerized Adaptive Test Disclosure are appended. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Adaptive Testing, Computer Assisted Testing, Educational, Measurement, Psychometrics}, author = {F Drasgow}, editor = {M. P. Potenza and J. J. Freemer and W. C. Ward} } @booklet {1350, title = {WISCAT: Een computergestuurd toetspakket voor rekenen en wiskunde [A computerized test package for arithmetic and mathematics]}, year = {1999}, address = {Cito: Arnhem, The Netherlands}, author = {Cito.} } @conference {1045, title = {What lies ahead? Computer technology and its implications for personnel testing}, booktitle = {Keynote address}, year = {1991}, address = {NATO Workshop on Computer-based Assessment of Military Personnel, Brussels, Belgium}, author = {J. R. McBride} } @conference {995, title = {What can we do with computerized adaptive testing and what we cannot do? }, booktitle = {Paper presented at the annual meeting of the Regional Language Center Seminar}, year = {1990}, note = {ERIC No. ED 322 7829}, author = {Laurier, M.} } @article {530, title = {Wilcox{\textquoteright} closed sequential testing procedure in stratified item domains}, journal = {Methodika}, volume = {1(1)}, year = {1987}, pages = {3-12}, author = {de Gruijter, D. N.} } @booklet {1471, title = {A word knowledge item pool for adaptive ability measurement (Research Report 74-2)}, year = {1974}, address = {Minneapolis MN: Department of Psychology, Computerized Adaptive Testing Laboratory}, author = {J. R. McBride and Weiss, D. J.} }