@article {307, title = {The D-optimality item selection criterion in the early stage of CAT: A study with the graded response model}, journal = {Journal of Educational and Behavioral Statistics}, volume = {33}, number = {1}, year = {2008}, pages = {88-110}, abstract = {During the early stage of computerized adaptive testing (CAT), item selection criteria based on Fisher{\textquoteright}s information often produce less stable latent trait estimates than the Kullback-Leibler global information criterion. Robustness against early stage instability has been reported for the D-optimality criterion in a polytomous CAT with the Nominal Response Model and is shown herein to be reproducible for the Graded Response Model. For comparative purposes, the A-optimality and the global information criteria are also applied. Their item selection is investigated as a function of test progression and item bank composition. The results indicate how the selection of specific item parameters underlies the criteria performances evaluated via accuracy and precision of estimation. In addition, the criteria item exposure rates are compared, without the use of any exposure controlling measure. On the account of stability, precision, accuracy, numerical simplicity, and less evidently, item exposure rate, the D-optimality criterion can be recommended for CAT.}, keywords = {computerized adaptive testing, D optimality, item selection}, author = {Passos, V. L. and Berger, M. P. F. and Tan, F. E. S.} } @article {306, title = {Test design optimization in CAT early stage with the nominal response model}, journal = {Applied Psychological Measurement}, volume = {31}, number = {3}, year = {2007}, pages = {213-232}, publisher = {Sage Publications: US}, abstract = {The early stage of computerized adaptive testing (CAT) refers to the phase of the trait estimation during the administration of only a few items. This phase can be characterized by bias and instability of estimation. In this study, an item selection criterion is introduced in an attempt to lessen this instability: the D-optimality criterion. A polytomous unconstrained CAT simulation is carried out to evaluate this criterion{\textquoteright}s performance under different test premises. The simulation shows that the extent of early stage instability depends primarily on the quality of the item pool information and its size and secondarily on the item selection criteria. The efficiency of the D-optimality criterion is similar to the efficiency of other known item selection criteria. Yet, it often yields estimates that, at the beginning of CAT, display a more robust performance against instability. (PsycINFO Database Record (c) 2007 APA, all rights reserved)}, keywords = {computerized adaptive testing, nominal response model, robust performance, test design optimization}, isbn = {0146-6216 (Print)}, author = {Passos, V. L. and Berger, M. P. F. and Tan, F. E.} } @article {754, title = {Some new item selection criteria for adaptive testing}, journal = {Journal of Educational and Behavioral Statistics}, volume = {22}, year = {1997}, pages = {203-226}, author = {Veerkamp, W. J. J., and Berger, M. P. F.} } @article {2029, title = {A General Approach to Algorithmic Design of Fixed-Form Tests, Adaptive Tests, and Testlets}, journal = {Applied Psychological Measurement}, volume = {18}, year = {1994}, pages = {141-153}, author = {Berger, M. P. F.} } @article {473, title = {A general approach to algorithmic design of fixed-form tests, adaptive tests, and testlets}, journal = {Applied Psychological Measurement}, volume = {18}, year = {1994}, pages = {141-153}, author = {Berger, M. P. F.} } @booklet {1545, title = {Some new item selection criteria for adaptive testing (Research Rep 94-6)}, year = {1994}, address = {Enschede, The Netherlands: University of Twente, Department of Educational Measurement and Data Analysis.}, author = {Veerkamp, W. J. and Berger, M. P. F.} }