{"id":475,"slug":"bio-nlp-umass--medthinkvqa","name":"MedThinkVQA","author":"bio-nlp-umass","description":"\n\t\n\t\t\n\t\tMedThinkVQA\n\t\n\nMedThinkVQA is an expert-annotated benchmark for multi-image diagnostic reasoning in radiology. Unlike prior medical VQA benchmarks that typically contain at most one image per case, MedThinkVQA requires models to extract evidence from each image, integrate cross-view information, and perform differential-diagnosis reasoning.\nLinks\n\nGitHub: https://github.com/benluwang/MedThinkVQA\nLeaderboard: https://benluwang.github.io/MedThinkVQA/\nSubmission Guide:… See the full description on the dataset page: https://huggingface.co/datasets/bio-nlp-umass/MedThinkVQA.","tags":"[\"Task_categories:question-Answering\",\"Task_categories:text-Generation\",\"Language:en\",\"Size_categories:1K<n<10K\",\"Format:parquet\",\"Modality:image\"]","license":null,"framework":null,"parameters":null,"downloads":85843,"likes":8,"verified":0,"created_at":"2026-04-24 15:38:55","updated_at":"2026-05-06 04:06:06","source_url":"https://huggingface.co/datasets/bio-nlp-umass/MedThinkVQA","source_platform":"huggingface","hf_repo_id":"bio-nlp-umass/MedThinkVQA","ollama_name":"","category":"dataset","latest_version":"v1.0.0","version_count":1,"signature_count":1,"risk_level":null,"risk_score":null,"versions":[{"id":474,"model_id":475,"version":"v1.0.0","manifest_hash":"12070f2800f46a04ab345ad29ade1431d33c67732722d7b72e0e953157c59d10","file_count":0,"total_size":0,"r2_manifest_key":"manifests/datasets/bio-nlp-umass--medthinkvqa/v1.0.0.json","created_at":"2026-04-24 15:38:55"}],"files":[],"signatures":[{"id":999,"version_id":474,"signer_did":"did:quantamrkt:registry:shield-v1","algorithm":"ML-DSA-65","signature_hex":"f5b5c8697aabf3259bd6f6d72ef855d99082197fc295de7826d2756b1c5d657e","attestation_type":"registry","signed_at":"2026-04-24 15:38:55"}],"hndl":null}