@inbook {1908, title = {The MEDPRO project: An SBIR project for a comprehensive IRT and CAT software system: IRT software}, year = {2009}, note = {PDF File, 817 K}, address = {D. J. Weiss (Ed.), Proceedings of the 2009 GMAC Conference on Computerized Adaptive Testing.}, abstract = {IRTPRO (Item Response Theory for Patient-Reported Outcomes) is an entirely new application for item calibration and test scoring using IRT. IRTPRO implements algorithms for maximum likelihood estimation of item parameters (item calibration) for several unidimensional and multidimensional item response theory (IRT) models for dichotomous and polytomous item responses. In addition, the software provides computation of goodness-of-fit indices, statistics for the diagnosis of local dependence and for the detection of differential item functioning (DIF), and IRT scaled scores. This paper illustrates the use, and some capabilities, of the software.}, author = {Thissen, D.} } @article {29, title = {Developing tailored instruments: item banking and computerized adaptive assessment}, journal = {Quality of Life Research}, volume = {16}, number = {Suppl 1}, year = {2007}, note = {Bjorner, Jakob BueChang, Chih-HungThissen, DavidReeve, Bryce B1R43NS047763-01/NS/United States NINDSAG015815/AG/United States NIAResearch Support, N.I.H., ExtramuralNetherlandsQuality of life research : an international journal of quality of life aspects of treatment, care and rehabilitationQual Life Res. 2007;16 Suppl 1:95-108. Epub 2007 Feb 15.}, pages = {95-108}, edition = {2007/05/29}, abstract = {Item banks and Computerized Adaptive Testing (CAT) have the potential to greatly improve the assessment of health outcomes. This review describes the unique features of item banks and CAT and discusses how to develop item banks. In CAT, a computer selects the items from an item bank that are most relevant for and informative about the particular respondent; thus optimizing test relevance and precision. Item response theory (IRT) provides the foundation for selecting the items that are most informative for the particular respondent and for scoring responses on a common metric. The development of an item bank is a multi-stage process that requires a clear definition of the construct to be measured, good items, a careful psychometric analysis of the items, and a clear specification of the final CAT. The psychometric analysis needs to evaluate the assumptions of the IRT model such as unidimensionality and local independence; that the items function the same way in different subgroups of the population; and that there is an adequate fit between the data and the chosen item response models. Also, interpretation guidelines need to be established to help the clinical application of the assessment. Although medical research can draw upon expertise from educational testing in the development of item banks and CAT, the medical field also encounters unique opportunities and challenges.}, keywords = {*Health Status, *Health Status Indicators, *Mental Health, *Outcome Assessment (Health Care), *Quality of Life, *Questionnaires, *Software, Algorithms, Factor Analysis, Statistical, Humans, Models, Statistical, Psychometrics}, isbn = {0962-9343 (Print)}, author = {Bjorner, J. B. and Chang, C-H. and Thissen, D. and Reeve, B. B.} } @inbook {1762, title = {Exploring potential designs for multi-form structure computerized adaptive tests with uniform item exposure}, year = {2007}, note = {{PDF file, 295 KB}}, address = {D. J. Weiss (Ed.), Proceedings of the 2007 GMAC Conference on Computerized Adaptive Testing.}, author = {Edwards, M. C. and Thissen, D.} } @article {386, title = {Methodological issues for building item banks and computerized adaptive scales}, journal = {Quality of Life Research}, volume = {16}, number = {S1}, year = {2007}, pages = {109-119, }, abstract = {Abstract This paper reviews important methodological considerations for developing item banks and computerized adaptive scales (commonly called computerized adaptive tests in the educational measurement literature, yielding the acronym CAT), including issues of the reference population, dimensionality, dichotomous versus polytomous response scales, differential item functioning (DIF) and conditional scoring, mode effects, the impact of local dependence, and innovative approaches to assessment using CATs in health outcomes research.}, isbn = {0962-93431573-2649}, author = {Thissen, D. and Reeve, B. B. and Bjorner, J. B. and Chang, C-H.} } @article {328, title = {Psychometric evaluation and calibration of health-related quality of life item banks: plans for the Patient-Reported Outcomes Measurement Information System (PROMIS)}, journal = {Medical Care}, volume = {45}, number = {5 Suppl 1}, year = {2007}, note = {Reeve, Bryce BHays, Ron DBjorner, Jakob BCook, Karon FCrane, Paul KTeresi, Jeanne AThissen, DavidRevicki, Dennis AWeiss, David JHambleton, Ronald KLiu, HonghuGershon, RichardReise, Steven PLai, Jin-sheiCella, DavidPROMIS Cooperative GroupAG015815/AG/United States NIAResearch Support, N.I.H., ExtramuralUnited StatesMedical careMed Care. 2007 May;45(5 Suppl 1):S22-31.}, month = {May}, pages = {S22-31}, edition = {2007/04/20}, abstract = {BACKGROUND: The construction and evaluation of item banks to measure unidimensional constructs of health-related quality of life (HRQOL) is a fundamental objective of the Patient-Reported Outcomes Measurement Information System (PROMIS) project. OBJECTIVES: Item banks will be used as the foundation for developing short-form instruments and enabling computerized adaptive testing. The PROMIS Steering Committee selected 5 HRQOL domains for initial focus: physical functioning, fatigue, pain, emotional distress, and social role participation. This report provides an overview of the methods used in the PROMIS item analyses and proposed calibration of item banks. ANALYSES: Analyses include evaluation of data quality (eg, logic and range checking, spread of response distribution within an item), descriptive statistics (eg, frequencies, means), item response theory model assumptions (unidimensionality, local independence, monotonicity), model fit, differential item functioning, and item calibration for banking. RECOMMENDATIONS: Summarized are key analytic issues; recommendations are provided for future evaluations of item banks in HRQOL assessment.}, keywords = {*Health Status, *Information Systems, *Quality of Life, *Self Disclosure, Adolescent, Adult, Aged, Calibration, Databases as Topic, Evaluation Studies as Topic, Female, Humans, Male, Middle Aged, Outcome Assessment (Health Care)/*methods, Psychometrics, Questionnaires/standards, United States}, isbn = {0025-7079 (Print)}, author = {Reeve, B. B. and Hays, R. D. and Bjorner, J. B. and Cook, K. F. and Crane, P. K. and Teresi, J. A. and Thissen, D. and Revicki, D. A. and Weiss, D. J. and Hambleton, R. K. and Liu, H. and Gershon, R. C. and Reise, S. P. and Lai, J. S. and Cella, D.} } @article {275, title = {A Bayesian method for the detection of item preknowledge in computerized adaptive testing}, journal = {Applied Psychological Measurement}, volume = {27}, number = {2}, year = {2003}, pages = {121-137}, abstract = {With the increased use of continuous testing in computerized adaptive testing, new concerns about test security have evolved, such as how to ensure that items in an item pool are safeguarded from theft. In this article, procedures to detect test takers using item preknowledge are explored. When test takers use item preknowledge, their item responses deviate from the underlying item response theory (IRT) model, and estimated abilities may be inflated. This deviation may be detected through the use of person-fit indices. A Bayesian posterior log odds ratio index is proposed for detecting the use of item preknowledge. In this approach to person fit, the estimated probability that each test taker has preknowledge of items is updated after each item response. These probabilities are based on the IRT parameters, a model specifying the probability that each item has been memorized, and the test taker{\textquoteright}s item responses. Simulations based on an operational computerized adaptive test (CAT) pool are used to demonstrate the use of the odds ratio index. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Adaptive Testing, Cheating, Computer Assisted Testing, Individual Differences computerized adaptive testing, Item, Item Analysis (Statistical), Mathematical Modeling, Response Theory}, author = {McLeod, L. and Lewis, C. and Thissen, D.} } @article {641, title = {A Bayesian method for the detection of item preknowledge in computerized adaptive testing}, journal = {Applied Psychological Measurement}, volume = {27}, year = {2003}, pages = {2, 121-137}, author = {McLeod L. D., Lewis, C., and Thissen, D.} } @conference {1197, title = {Developing tailored instruments: Item banking and computerized adaptive assessment}, booktitle = {Paper presented at the conference {\textquotedblleft}Advances in Health Outcomes Measurement}, year = {2002}, note = {{PDF file, 170 KB}}, address = {{\textquotedblright} Bethesda, Maryland, June 23-25}, author = {Thissen, D.} } @inbook {385, title = {Item response theory applied to combinations of multiple-choice and constructed-response items--approximation methods for scale scores}, booktitle = {Test scoring}, year = {2001}, note = {Using Smart Source ParsingTest scoring. (pp. 293-341). Mahwah, NJ : Lawrence Erlbaum Associates, Publishers. xii, 422 pp}, pages = {289-315}, publisher = {Lawrence Erlbaum Associates}, organization = {Lawrence Erlbaum Associates}, chapter = {8}, address = {Mahwah, N.J. USA}, abstract = {(From the chapter) The authors develop approximate methods that replace the scoring tables with weighted linear combinations of the component scores. Topics discussed include: a linear approximation for the extension to combinations of scores; the generalization of two or more scores; potential applications of linear approximations to item response theory in computerized adaptive tests; and evaluation of the pattern-of-summed-scores, and Gaussian approximation, estimates of proficiency. (PsycINFO Database Record (c) 2005 APA )}, keywords = {Adaptive Testing, Item Response Theory, Method), Multiple Choice (Testing, Scoring (Testing), Statistical Estimation, Statistical Weighting, Test Items, Test Scores}, author = {Thissen, D. and Nelson, L. A. and Swygert, K. A.} } @book {1710, title = {Computerized adaptive testing: A primer (2nd edition)}, year = {2000}, address = {Hillsdale, N. J. : Lawrence Erlbaum Associates}, author = {Wainer, H., and Dorans, N. and Eignor, D. R. and Flaugher, R. and Green, B. F. and Mislevy, R. and Steinberg, L. and Thissen, D.} } @conference {1196, title = {Some item response theory to provide scale scores based on linear combinations of testlet scores, for computerized adaptive tests}, booktitle = {Paper presented at the annual meeting of the Psychometric Society}, year = {1998}, address = {Urbana, IL}, author = {Thissen, D.} } @article {703, title = {On the reliability of testlet-based tests}, journal = {Journal of Educational Measurement}, volume = {28}, year = {1991}, pages = {237-247}, author = {Sireci, S. G. and Wainer, H., and Thissen, D.} } @book {1727, title = {Computerized adaptive testing: A primer (Eds.)}, year = {1990}, address = {Hillsdale NJ: Erlbaum}, author = {Wainer, H., and Dorans, N. J. and Flaugher, R. and Green, B. F. and Mislevy, R. J. and Steinberg, L. and Thissen, D.} } @inbook {1937, title = {Future challenges}, year = {1990}, address = {H. Wainer (Ed.), Computerized adaptive testing: A primer (pp. 233-272). Hillsdale NJ: Erlbaum.}, author = {Wainer, H., and Dorans, N. J. and Green, B. F. and Mislevy, R. J. and Steinberg, L. and Thissen, D.} } @inbook {1907, title = {Reliability and measurement precision}, year = {1990}, address = {H. Wainer, N. J. Dorans, R. Flaugher, B. F. Green, R. J. Mislevy, L. Steinberg, and D. Thissen (Eds.), Computerized adaptive testing: A primer (pp. 161-186). Hillsdale NJ: Erlbaum.}, author = {Thissen, D.} } @inbook {1909, title = {Testing algorithms}, year = {1990}, address = {H. Wainer (Ed.), Computerized adaptive testing: A primer (pp. 103-135). Hillsdale NJ: Erlbaum.}, author = {Thissen, D. and Mislevy, R. J.} } @inbook {1896, title = {Validity}, year = {1990}, address = {H. Wainer (Ed.), Computerized adaptive testing: A primer (pp. 187-231). Hillsdale NJ: Erlbaum.}, author = {Steinberg, L. and Thissen, D. and Wainer, H.,} } @article {723, title = {Trace lines for testlets: A use of multiple-categorical-response models}, journal = {Journal of Educational Measurement}, volume = {26}, year = {1989}, pages = {247-260}, author = {Thissen, D. and Steinberg, L. and Mooney, J.A.} }