@article {2047, title = {Comparison Between Dichotomous and Polytomous Scoring of Innovative Items in a Large-Scale Computerized Adaptive Test}, journal = {Educational and Psychological Measurement}, volume = {72}, year = {2012}, pages = {493-509}, abstract = {

This study explored the impact of partial credit scoring of one type of innovative items (multiple-response items) in a computerized adaptive version of a large-scale licensure pretest and operational test settings. The impacts of partial credit scoring on the estimation of the ability parameters and classification decisions in operational test settings were explored in one real data analysis and two simulation studies when two different polytomous scoring algorithms, automated polytomous scoring and rater-generated polytomous scoring, were applied. For the real data analyses, the ability estimates from dichotomous and polytomous scoring were highly correlated; the classification consistency between different scoring algorithms was nearly perfect. Information distribution changed slightly in the operational item bank. In the two simulation studies comparing each polytomous scoring with dichotomous scoring, the ability estimates resulting from polytomous scoring had slightly higher measurement precision than those resulting from dichotomous scoring. The practical impact related to classification decision was minor because of the extremely small number of items that could be scored polytomously in this current study.

}, doi = {10.1177/0013164411422903}, author = {Jiao, H. and Liu, J. and Haynie, K. and Woo, A. and Gorham, J.} } @booklet {1414, title = {An investigation of two combination procedures of SPRT for three-category decisions in computerized classification test}, year = {2004}, note = {{PDF file, 649 KB}}, address = {Paper presented at the annual meeting of the American Educational Research Association, San Diego CA}, author = {Jiao, H. and Wang, S and Lau, A} } @proceedings {214, title = {An investigation of two combination procedures of SPRT for three-category classification decisions in computerized classification test}, journal = {annual meeting of the American Educational Research Association}, year = {2004}, note = {annual meeting of the American Educational Research Association, San Antonio}, month = {04/2004}, address = {San Antonio, Texas}, keywords = {computerized adaptive testing, Computerized classification testing, sequential probability ratio testing}, author = {Jiao, H. and Wang, S and Lau, CA} } @booklet {1413, title = {The effects of model misfit in computerized classification test}, year = {2003}, note = {{PDF file, 432 KB}}, address = {Paper presented at the annual meeting of the National Council on Measurement in Education, Chicago IL}, author = {Jiao, H. and Lau, A. C.} } @article {213, title = {The effects of model specification error in item response theory-based computerized classification test using sequential probability ratio test}, journal = {Dissertation Abstracts International Section A: Humanities \& Social Sciences}, volume = {64}, number = {2-A}, year = {2003}, pages = {478}, abstract = {This study investigated the effects of model specification error on classification accuracy, error rates, and average test length in Item Response Theory (IRT) based computerized classification test (CCT) using sequential probability ratio test (SPRT) in making binary decisions from examinees{\textquoteright} dichotomous responses. This study consisted of three sub-studies. In each sub-study, one of the three unidimensional dichotomous IRT models, the 1-parameter logistic (IPL), the 2-parameter logistic (2PL), and the 3-parameter logistic (3PL) model was set as the true model and the other two models were treated as the misfit models. Item pool composition, test length, and stratum depth were manipulated to simulate different test conditions. To ensure the validity of the study results, the true model based CCTs using the true and the recalibrated item parameters were compared first to study the effect of estimation error in item parameters in CCTs. Then, the true model and the misfit model based CCTs were compared to accomplish the research goal, The results indicated that estimation error in item parameters did not affect classification results based on CCTs using SPRT. The effect of model specification error depended on the true model, the misfit model, and the item pool composition. When the IPL or the 2PL IRT model was the true model, the use of another IRT model had little impact on the CCT results. When the 3PL IRT model was the true model, the use of the 1PL model raised the false positive error rates. The influence of using the 2PL instead of the 3PL model depended on the item pool composition. When the item discrimination parameters varied greatly from uniformity of one, the use of the 2PL IRT model raised the false negative error rates to above the nominal level. In the simulated test conditions with test length and item exposure constraints, using a misfit model in CCTs most often affected the average test length. Its effects on error rates and classification accuracy were negligible. It was concluded that in CCTs using SPRT, IRT model selection and evaluation is indispensable (PsycINFO Database Record (c) 2004 APA, all rights reserved).}, author = {Jiao, H.} }