datasetId
large_stringlengths
6
110
author
large_stringlengths
3
34
last_modified
large_stringdate
2021-05-20 00:57:22
2025-05-07 08:14:41
downloads
int64
0
3.97M
likes
int64
0
7.74k
tags
large listlengths
1
2.03k
task_categories
large listlengths
0
16
createdAt
large_stringdate
2022-03-02 23:29:22
2025-05-07 08:13:27
trending_score
float64
1
39
card
large_stringlengths
31
1M
jdchang/qsharp-bt-mixture
jdchang
2025-05-05T19:55:28Z
0
0
[ "region:us" ]
[]
2025-05-05T19:54:46Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: reward sequence: bool - name: roll_in_ids sequence: sequence: int32 - name: roll_outs_ids sequence: sequence: int32 - name: processed_answer sequence: string splits: - name: train num_bytes: 2433860777 num_examples: 27194 download_size: 688707061 dataset_size: 2433860777 configs: - config_name: default data_files: - split: train path: data/train-* ---
french-datasets/DrBenchmark_CLISTER
french-datasets
2025-05-05T19:55:08Z
0
0
[ "language:fra", "region:us" ]
[]
2025-05-05T19:54:38Z
null
--- language: - fra viewer: false --- Ce répertoire est vide, il a été créé pour améliorer le référencement du jeu de données [DrBenchmark/CLISTER](https://huggingface.co/datasets/DrBenchmark/CLISTER).
ieuniversity/group_9_submission
ieuniversity
2025-05-05T19:53:02Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T13:15:21Z
null
--- dataset_info: features: - name: ID dtype: string - name: CLASE dtype: string splits: - name: train num_bytes: 895725 num_examples: 25808 download_size: 501422 dataset_size: 895725 configs: - config_name: default data_files: - split: train path: data/train-* ---
buryat-translation/buryat_russian_parallel_corpus
buryat-translation
2025-05-05T19:52:28Z
0
0
[ "region:us" ]
[]
2025-05-05T19:52:22Z
null
--- dataset_info: features: - name: bxr dtype: string - name: ru dtype: string - name: corpus dtype: string splits: - name: train num_bytes: 26915257 num_examples: 118224 - name: test num_bytes: 1113951 num_examples: 2000 download_size: 12550245 dataset_size: 28029208 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
zhengbang0707/REFUEL_it2_mask2_v2_llama3_30k
zhengbang0707
2025-05-05T19:51:52Z
0
0
[ "region:us" ]
[]
2025-05-05T19:49:16Z
null
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: chosen list: - name: content dtype: string - name: role dtype: string - name: reject list: - name: content dtype: string - name: role dtype: string - name: chosen_token sequence: int64 - name: reject_token sequence: int64 - name: chosen_mask sequence: int64 - name: reject_mask sequence: int64 - name: chosen_reward_list sequence: float64 - name: reject_reward_list sequence: float64 - name: chosen_reward_list_new sequence: float64 - name: reject_reward_list_new sequence: float64 - name: chosen_reward dtype: float64 - name: reject_reward dtype: float64 splits: - name: train num_bytes: 2206549179 num_examples: 30000 - name: test num_bytes: 36786161 num_examples: 500 - name: val num_bytes: 36656292 num_examples: 500 download_size: 156648282 dataset_size: 2279991632 --- # Dataset Card for "REFUEL_it2_mask2_v2_llama3_30k" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
TheRealPilot638/Llama-3.2-1B-dvts_16_no_chunking_H200
TheRealPilot638
2025-05-05T19:50:15Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-03T19:29:07Z
null
--- dataset_info: - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string splits: - name: train num_bytes: 13571415 num_examples: 500 download_size: 2376352 dataset_size: 13571415 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 32 num_examples: 1 download_size: 1961 dataset_size: 32 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string splits: - name: train num_bytes: 13587591 num_examples: 500 download_size: 2365587 dataset_size: 13587591 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 32 num_examples: 1 download_size: 1961 dataset_size: 32 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string splits: - name: train num_bytes: 13635411 num_examples: 500 download_size: 2386042 dataset_size: 13635411 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 32 num_examples: 1 download_size: 1961 dataset_size: 32 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string splits: - name: train num_bytes: 13546999 num_examples: 500 download_size: 2355221 dataset_size: 13546999 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 32 num_examples: 1 download_size: 1961 dataset_size: 32 configs: - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-16--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals/train-* ---
osama24sy/llama3.1-8b-it-10k-qwen-singleturn-onesolution-r256-24-v0.3
osama24sy
2025-05-05T19:47:45Z
0
0
[ "region:us" ]
[]
2025-05-05T19:47:42Z
null
--- dataset_info: features: - name: index dtype: int64 - name: numbers sequence: int64 - name: operations sequence: sequence: string - name: response dtype: string - name: token_count dtype: int64 splits: - name: train num_bytes: 346585 num_examples: 150 download_size: 131720 dataset_size: 346585 configs: - config_name: default data_files: - split: train path: data/train-* ---
AKCIT-Audio/LIGHT_transcriptions
AKCIT-Audio
2025-05-05T19:45:15Z
34
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-04-30T16:09:27Z
null
--- dataset_info: features: - name: Dialog dtype: int64 - name: Turn dtype: int64 - name: Speaker dtype: string - name: Sentence dtype: string - name: Translated_Sentence dtype: string splits: - name: train num_bytes: 19929590 num_examples: 103935 download_size: 11314549 dataset_size: 19929590 configs: - config_name: default data_files: - split: train path: data/train-* ---
jdchang/qsharp-bt-32b
jdchang
2025-05-05T19:37:18Z
0
0
[ "region:us" ]
[]
2025-05-05T19:36:33Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: reward sequence: bool - name: roll_in_ids sequence: sequence: int32 - name: roll_outs_ids sequence: sequence: int32 - name: processed_answer sequence: string splits: - name: train num_bytes: 2980637821 num_examples: 31907 download_size: 841633940 dataset_size: 2980637821 configs: - config_name: default data_files: - split: train path: data/train-* ---
autoprogrammer/gsm-lf
autoprogrammer
2025-05-05T19:31:58Z
0
0
[ "region:us" ]
[]
2025-05-05T19:31:56Z
null
--- dataset_info: features: - name: prompt dtype: string - name: completion dtype: string - name: conversations list: - name: from dtype: string - name: value dtype: string - name: question dtype: string - name: answer dtype: string splits: - name: train num_bytes: 9930428 num_examples: 8792 download_size: 5321478 dataset_size: 9930428 configs: - config_name: default data_files: - split: train path: data/train-* ---
MBZUAI-IFM/24game_final
MBZUAI-IFM
2025-05-05T19:29:48Z
0
0
[ "region:us" ]
[]
2025-05-05T19:29:46Z
null
--- dataset_info: features: - name: conversations list: - name: from dtype: string - name: value dtype: string - name: metadata dtype: string - name: dataset_source dtype: string splits: - name: train num_bytes: 869894 num_examples: 259 download_size: 398334 dataset_size: 869894 configs: - config_name: default data_files: - split: train path: data/train-* ---
reasoning-proj/exp_rob_dLlama_3_1_Nemotron_Nano_8B_v1_madversarial_continue_t30
reasoning-proj
2025-05-05T19:28:26Z
0
0
[ "region:us" ]
[]
2025-05-05T19:28:23Z
null
--- dataset_info: features: - name: question dtype: string - name: answer_content dtype: string - name: reference_answer dtype: string - name: id dtype: string - name: metadata struct: - name: question_license dtype: string - name: question_source dtype: string - name: model_name dtype: string - name: mutated_answer_content dtype: string splits: - name: train num_bytes: 12391992 num_examples: 498 download_size: 4909229 dataset_size: 12391992 configs: - config_name: default data_files: - split: train path: data/train-* ---
xbilek25/static_validation_1.0_absorb_0.1
xbilek25
2025-05-05T19:28:12Z
0
0
[ "region:us" ]
[]
2025-05-05T19:26:03Z
null
--- dataset_info: features: - name: client_id dtype: string - name: path dtype: string - name: audio dtype: audio: sampling_rate: 16000 - name: sentence dtype: string - name: up_votes dtype: int64 - name: down_votes dtype: int64 - name: age dtype: string - name: gender dtype: string - name: accent dtype: string - name: locale dtype: string - name: segment dtype: string - name: variant dtype: string splits: - name: train num_bytes: 72296766.0 num_examples: 360 download_size: 65837984 dataset_size: 72296766.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-labels-prm-indices_107520_115200
kaiwenw
2025-05-05T19:24:14Z
0
0
[ "region:us" ]
[]
2025-05-05T19:23:50Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1119348010 num_examples: 7680 download_size: 668600033 dataset_size: 1119348010 configs: - config_name: default data_files: - split: train path: data/train-* ---
ajagota71/ajagota71_pythia-70m-detox-epoch-100_800_samples_detoxified
ajagota71
2025-05-05T19:24:11Z
0
0
[ "region:us" ]
[]
2025-05-05T19:24:09Z
null
--- dataset_info: features: - name: prompt dtype: string - name: output dtype: string - name: model_name dtype: string - name: temperature dtype: float64 - name: top_p dtype: float64 - name: generation_timestamp dtype: string splits: - name: train num_bytes: 208384 num_examples: 800 download_size: 95857 dataset_size: 208384 configs: - config_name: default data_files: - split: train path: data/train-* ---
autoprogrammer/ESFT-translation-lf
autoprogrammer
2025-05-05T19:22:57Z
0
0
[ "region:us" ]
[]
2025-05-05T19:22:55Z
null
--- dataset_info: features: - name: prompt dtype: string - name: completion dtype: string - name: conversations list: - name: from dtype: string - name: value dtype: string splits: - name: train num_bytes: 7959891 num_examples: 11639 download_size: 3773176 dataset_size: 7959891 configs: - config_name: default data_files: - split: train path: data/train-* ---
autoprogrammer/alpaca_farm-lf
autoprogrammer
2025-05-05T19:22:45Z
0
0
[ "region:us" ]
[]
2025-05-05T19:22:43Z
null
--- dataset_info: features: - name: prompt dtype: string - name: completion dtype: string - name: conversations list: - name: from dtype: string - name: value dtype: string splits: - name: train num_bytes: 7454250 num_examples: 10000 download_size: 4500244 dataset_size: 7454250 configs: - config_name: default data_files: - split: train path: data/train-* ---
reasoning-proj/exp_rob_dLlama_3_1_Nemotron_Nano_8B_v1_madversarial_insert_w_t30
reasoning-proj
2025-05-05T19:21:53Z
0
0
[ "region:us" ]
[]
2025-05-05T19:21:48Z
null
--- dataset_info: features: - name: question dtype: string - name: answer_content dtype: string - name: reference_answer dtype: string - name: id dtype: string - name: metadata struct: - name: question_license dtype: string - name: question_source dtype: string - name: model_name dtype: string - name: mutated_answer_content dtype: string splits: - name: train num_bytes: 12562179 num_examples: 498 download_size: 5016483 dataset_size: 12562179 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-labels-prm-indices_76800_84480
kaiwenw
2025-05-05T19:20:54Z
0
0
[ "region:us" ]
[]
2025-05-05T19:20:31Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1119030828 num_examples: 7680 download_size: 669765637 dataset_size: 1119030828 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_0_for_gen_11_v2
HungVu2003
2025-05-05T19:20:33Z
0
0
[ "region:us" ]
[]
2025-05-05T19:20:32Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 986510 num_examples: 12500 download_size: 636460 dataset_size: 986510 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-labels-prm-indices_53760_61440
kaiwenw
2025-05-05T19:20:11Z
0
0
[ "region:us" ]
[]
2025-05-05T19:19:43Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1116451858 num_examples: 7680 download_size: 668345460 dataset_size: 1116451858 configs: - config_name: default data_files: - split: train path: data/train-* ---
ajagota71/EleutherAI_pythia-70M_700_samples_original
ajagota71
2025-05-05T19:19:08Z
0
0
[ "region:us" ]
[]
2025-05-05T19:18:43Z
null
--- dataset_info: features: - name: prompt dtype: string - name: output dtype: string - name: model_name dtype: string - name: temperature dtype: float64 - name: top_p dtype: float64 - name: generation_timestamp dtype: string splits: - name: train num_bytes: 182490 num_examples: 700 download_size: 84954 dataset_size: 182490 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_1_for_gen_10_v2
HungVu2003
2025-05-05T19:14:41Z
0
0
[ "region:us" ]
[]
2025-05-05T19:14:38Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3728613 num_examples: 12500 download_size: 1977457 dataset_size: 3728613 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_1_for_gen_8_v2
HungVu2003
2025-05-05T19:02:27Z
0
0
[ "region:us" ]
[]
2025-05-05T19:02:26Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3701324 num_examples: 12500 download_size: 1974942 dataset_size: 3701324 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-old-prm-indices_107520_115200
kaiwenw
2025-05-05T18:56:17Z
0
0
[ "region:us" ]
[]
2025-05-05T18:56:05Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1119348010 num_examples: 7680 download_size: 264394896 dataset_size: 1119348010 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-old-prm-indices_69120_76800
kaiwenw
2025-05-05T18:55:49Z
0
0
[ "region:us" ]
[]
2025-05-05T18:55:36Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1111892365 num_examples: 7680 download_size: 263270008 dataset_size: 1111892365 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-hmmt-feb-24-4096-with-old-prm-indices_61440_69120
kaiwenw
2025-05-05T18:55:46Z
0
0
[ "region:us" ]
[]
2025-05-05T18:55:34Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1117754896 num_examples: 7680 download_size: 264514132 dataset_size: 1117754896 configs: - config_name: default data_files: - split: train path: data/train-* ---
MBZUAI-IFM/AM_clean_en_final_10perc
MBZUAI-IFM
2025-05-05T18:54:47Z
0
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T10:20:58Z
null
--- dataset_info: features: - name: conversations list: - name: from dtype: string - name: value dtype: string - name: dataset_source dtype: string - name: metadata dtype: string - name: has_forbidden dtype: bool splits: - name: train num_bytes: 2512045175.0 num_examples: 107490 download_size: 1137638779 dataset_size: 2512045175.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_1_for_gen_6_v2
HungVu2003
2025-05-05T18:50:32Z
0
0
[ "region:us" ]
[]
2025-05-05T18:50:31Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3679977 num_examples: 12500 download_size: 1965856 dataset_size: 3679977 configs: - config_name: default data_files: - split: train path: data/train-* ---
weqweasdas/qw_rej_math
weqweasdas
2025-05-05T18:50:16Z
0
0
[ "region:us" ]
[]
2025-05-05T18:50:15Z
null
--- dataset_info: features: - name: idx dtype: int64 - name: question dtype: string - name: gt_cot dtype: string - name: gt dtype: string - name: level dtype: int64 - name: solution dtype: string - name: answer dtype: string - name: code sequence: string - name: pred sequence: string - name: report sequence: 'null' - name: score sequence: bool splits: - name: train num_bytes: 7197998 num_examples: 500 download_size: 1730882 dataset_size: 7197998 configs: - config_name: default data_files: - split: train path: data/train-* ---
MBZUAI-IFM/AM_clean_en_final_90perc
MBZUAI-IFM
2025-05-05T18:49:10Z
0
0
[ "size_categories:1M<n<10M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T10:49:26Z
null
--- dataset_info: features: - name: conversations list: - name: from dtype: string - name: value dtype: string - name: dataset_source dtype: string - name: metadata dtype: string - name: has_forbidden dtype: bool splits: - name: train num_bytes: 22500485772.0 num_examples: 967376 download_size: 10190024913 dataset_size: 22500485772.0 configs: - config_name: default data_files: - split: train path: data/train-* ---
Abeyankar/mcity_clean_2844_crowd
Abeyankar
2025-05-05T18:47:56Z
0
0
[ "task_categories:image-classification", "task_categories:object-detection", "language:en", "license:mit", "size_categories:1K<n<10K", "modality:image", "library:fiftyone", "region:us", "fiftyone", "fisheye8k", "image", "image-classification", "object-detection" ]
[ "image-classification", "object-detection" ]
2025-05-05T18:44:50Z
null
--- annotations_creators: [] language: en license: mit size_categories: - 1K<n<10K task_categories: - image-classification - object-detection task_ids: [] pretty_name: mcity_clean_newf2_2844 tags: - fiftyone - fisheye8k - image - image-classification - object-detection - object-detection description: Removed erroneous annotations, and changed labels using cvat dataset_summary: ' This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 2844 samples. ## Installation If you haven''t already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include ''max_samples'', etc dataset = load_from_hub("Abeyankar/mcity_clean_2844_crowd") # Launch the App session = fo.launch_app(dataset) ``` ' --- # Dataset Card for mcity_clean_newf2_2844 <!-- Provide a quick summary of the dataset. --> This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 2844 samples. ## Installation If you haven't already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include 'max_samples', etc dataset = load_from_hub("Abeyankar/mcity_clean_2844_crowd") # Launch the App session = fo.launch_app(dataset) ``` ## Dataset Details ### Dataset Description <!-- Provide a longer summary of what this dataset is. --> - **Curated by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Language(s) (NLP):** en - **License:** mit ### Dataset Sources [optional] <!-- Provide the basic links for the dataset. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the dataset is intended to be used. --> ### Direct Use <!-- This section describes suitable use cases for the dataset. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. --> [More Information Needed] ## Dataset Structure <!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. --> [More Information Needed] ## Dataset Creation ### Curation Rationale <!-- Motivation for the creation of this dataset. --> [More Information Needed] ### Source Data <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). --> #### Data Collection and Processing <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. --> [More Information Needed] #### Who are the source data producers? <!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. --> [More Information Needed] ### Annotations [optional] <!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. --> #### Annotation process <!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. --> [More Information Needed] #### Who are the annotators? <!-- This section describes the people or systems who created the annotations. --> [More Information Needed] #### Personal and Sensitive Information <!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations. ## Citation [optional] <!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the dataset or dataset card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Dataset Card Authors [optional] [More Information Needed] ## Dataset Card Contact [More Information Needed]
ICICLE-AI/ResourceEstimation_HLOGenCNN
ICICLE-AI
2025-05-05T18:45:21Z
11
0
[ "task_categories:graph-ml", "task_categories:tabular-regression", "source_datasets:custom", "language:en", "license:apache-2.0", "size_categories:1K<n<10K", "region:us", "HPC", "resource-prediction", "XLA", "compiler-features", "deep-learning", "graph-learning", "scheduling" ]
[ "graph-ml", "tabular-regression" ]
2025-04-04T17:34:45Z
null
--- dataset_name: "hlo-feature-dataset" pretty_name: "HLO Feature Dataset for Deep Learning Resource Estimation" dataset_type: "graph-and-tabular" license: "apache-2.0" task_categories: - graph-ml - tabular-regression language: "en" tags: - HPC - resource-prediction - XLA - compiler-features - deep-learning - graph-learning - scheduling size_categories: - 1K<n<10K source_datasets: - custom dataset_summary: > The HLO Feature Dataset contains High-Level Optimizer (HLO) graph features and metadata extracted from deep learning training workloads. It is designed for tasks such as runtime prediction, resource estimation, and graph-based machine learning in HPC environments. Each entry pairs model configuration metadata with compiler graph data stored in `.npz` format. Ideal for ML system optimization studies, GNN research, and AI workload scheduling. structured_data: features: - name: "batch" type: "integer" - name: "epochs" type: "integer" - name: "learn_rate" type: "float" - name: "gpu_core_count" type: "integer" - name: "gpu_memory_size" type: "integer" - name: "fit_time" type: "float" - name: "npz_path" type: "string" graph_data: node_features: "node_feat" edge_index: "edge_index" additional_keys: - "node_opcode" - "node_config_ids" - "node_splits" usage_example: | ```python from datasets import load_dataset import numpy as np dataset = load_dataset("your-username/hlo-feature-dataset") sample = dataset['train'][0] graph_data = np.load(sample['npz_path']) node_features = graph_data['node_feat'] edges = graph_data['edge_index'] --- # HLO Feature Dataset for Deep Learning Resource Estimation [![Dataset](https://img.shields.io/badge/HuggingFace-Dataset-blue)](https://huggingface.co/datasets/your-username/hlo-feature-dataset) ## Dataset Summary The **HLO Feature Dataset** is a collection of compiler-level graph features (HLO graphs) extracted from deep learning training workloads. Alongside detailed metadata (model configs, GPU stats), this dataset enables machine learning approaches for: - ⏱️ **Training Time Prediction** - 📉 **Resource Consumption Estimation** - ⚡ **HPC and GPU Scheduling Optimization** - 🧩 **Graph-based Neural Architecture Analysis** This dataset is ideal for experimenting with regression models (e.g., XGBoost) and Graph Neural Networks (GNNs) using compiler features. --- ## Supported Tasks - **⚙️ Runtime & Resource Prediction**: Predict training time (`fit_time`) based on HLO features. - **📊 ML for Systems Optimization**: Use tabular + graph data for AI workload management. - **🔗 Graph Representation Learning**: Apply GNNs on HLO graphs (`node_feat`, `edge_index`). --- ## Dataset Structure Each entry includes: - **Metadata**: From `dataset-new.csv` (model, optimizer, GPU specs, timing metrics, etc.) - **HLO Graph Features**: `.npz` files containing: - `node_opcode`, `node_feat`, `edge_index`, `node_config_ids`, `node_splits` --- ## Usage Example This example demonstrates how to load metadata, preprocess features, and train an XGBoost model to predict training time (`fit_time`), as shown in the Colab notebook. ```python import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor # Load metadata CSV df = pd.read_csv('dataset-new.csv') # Example feature selection (drop non-numeric/categorical handling needed) X = df[['batch', 'epochs', 'learn_rate', 'gpu_core_count', 'gpu_memory_size']] y = df['fit_time'] # Train-test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) # Initialize XGBoost Regressor xgb_model = XGBRegressor(n_estimators=100, learning_rate=0.1, max_depth=6, random_state=42) xgb_model.fit(X_train, y_train) # Evaluate preds = xgb_model.predict(X_test) rmse = mean_squared_error(y_test, preds, squared=False) print(f"RMSE: {rmse}") ``` --- ### Example Notebooks #### 🚀 Baseline: XGBoost for Resource Estimation A sample baseline implementation using **XGBoost** is provided to demonstrate how to predict resource metrics such as `fit_time` using the dataset's metadata. 📥 **Download the notebook** from the repository: [Baseline_XGBoost_Resource_Estimation.ipynb](https://huggingface.co/datasets/ICICLE-AI/ResourceEstimation_HLOGenCNN/blob/main/Baseline_XGBoost_Resource_Estimation.ipynb) This notebook covers: - Loading and preprocessing metadata from `dataset-new.csv` - Training an XGBoost regressor to predict training time - Evaluating model performance (e.g., RMSE) > ⚡ **Note:** Make sure to adjust paths if cloning the dataset locally or integrating with Hugging Face `datasets` API. --- ### Loading HLO Graph Features For graph-based ML tasks, load the `.npz` files: ```python npz_file = df.iloc[0]['npz_path'] graph_data = np.load(npz_file) node_features = graph_data['node_feat'] edges = graph_data['edge_index'] print("Node Feature Shape:", node_features.shape) print("Edge Index Shape:", edges.shape) ``` --- <!-- ## Citation If you use this dataset, please cite: ``` @misc{hlofeatures2025, title={HLO Feature Dataset for AI Resource Estimation}, author={Your Name}, year={2025}, url={https://huggingface.co/datasets/your-username/hlo-feature-dataset} } --> ``` --- ## License Specify your license here (e.g., MIT, Apache-2.0). --- ## Contributions Open to contributions! Feel free to suggest improvements or share your models trained on this dataset.
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_1_for_gen_5_v2
HungVu2003
2025-05-05T18:44:43Z
0
0
[ "region:us" ]
[]
2025-05-05T18:44:42Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3690117 num_examples: 12500 download_size: 1971558 dataset_size: 3690117 configs: - config_name: default data_files: - split: train path: data/train-* ---
chcaa/fiction4sentiment
chcaa
2025-05-05T18:39:49Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T09:36:05Z
null
--- dataset_info: features: - name: text dtype: string - name: label dtype: float64 - name: category dtype: string - name: author dtype: string - name: id dtype: string - name: year dtype: float64 - name: org_lang dtype: string - name: annotator_1 dtype: float64 - name: annotator_2 dtype: float64 - name: annotator_3 dtype: float64 - name: tr_xlm_roberta dtype: float64 - name: vader dtype: float64 - name: __index_level_0__ dtype: string splits: - name: train num_bytes: 1051007 num_examples: 6300 download_size: 326724 dataset_size: 1051007 configs: - config_name: default data_files: - split: train path: data/train-* --- ## Dataset description A dataset of literary sentences human-annotated for valence (0-10) used for developing multilingual SA ### 🔬 Data | | No. texts | No. annotations | No. words | Period | |-------------|-----|------|--------|------------| | **Fairy tales** | 3 | 772 | 18,597 | 1837-1847 | | **Hymns** | 65 | 2,026 | 12,798 | 1798-1873 | | **Prose** | 1 | 1,923 | 30,279 | 1952 | | **Poetry** | 40 | 1,579 | 11,576 | 1965 | This is the **Fiction4 dataset** of literary texts, spanning 109 individual texts across 4 genres and two languages (**English** and **Danish**) in the 19th and 20th century. The corpus consists of 3 main authors, Sylvia Plath for poetry, Ernest Hemingway for prose and H.C. Andersen for fairytales. Hymns represent a heterogenous colleciton from Danish official church hymnbooks from 1798-1873. The corpus was annotated for valence on a sentence basis by at least 2 annotators/sentence. ## Some tags: - text: sentence from a literary piece - label: human mean annotated score (0-10) - category: which literary genre it is [prose, poetry, hymns, fairytales] - automatic sentiment scores of the sentences via a model-based & a dictionary based method. Columns=[tr_xlm_roberta, vader] - id: parent story or collection of text ## Citation If you want to use this data, please cite our work [available here](https://ceur-ws.org/Vol-3834/paper98.pdf): ``` @inproceedings{feldkamp_sentiment_2024, title = {Sentiment {Below} the {Surface}: {Omissive} and {Evocative} {Strategies} in {Literature} and {Beyond}}, shorttitle = {Sentiment {Below} the {Surface}}, booktitle = {Computational {Humanities} {Research} 2024}, publisher = {CEUR Workshop Proceedings}, author = {Feldkamp, Pascale and Overgaard, Ea Lindhardt and Nielbo, Kristoffer Laigaard and Bizzoni, Yuri}, year = {2024}, } ```
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_1_for_gen_2_v2
HungVu2003
2025-05-05T18:26:42Z
0
0
[ "region:us" ]
[]
2025-05-05T18:26:40Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3710700 num_examples: 12500 download_size: 1972121 dataset_size: 3710700 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_0_for_gen_2_v2
HungVu2003
2025-05-05T18:26:38Z
0
0
[ "region:us" ]
[]
2025-05-05T18:26:37Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 977521 num_examples: 12500 download_size: 629784 dataset_size: 977521 configs: - config_name: default data_files: - split: train path: data/train-* ---
reasoning-proj/exp_rob_dLlama_3_1_Nemotron_Nano_8B_v1_mbenign_complete_step_t10
reasoning-proj
2025-05-05T18:23:52Z
0
0
[ "region:us" ]
[]
2025-05-05T18:23:50Z
null
--- dataset_info: features: - name: question dtype: string - name: answer_content dtype: string - name: reference_answer dtype: string - name: id dtype: string - name: metadata struct: - name: question_license dtype: string - name: question_source dtype: string - name: model_name dtype: string - name: mutated_answer_content dtype: string splits: - name: train num_bytes: 1094403 num_examples: 50 download_size: 471856 dataset_size: 1094403 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.5_alpha_0.0_num-company_2_dataset_0_for_gen_1_v2
HungVu2003
2025-05-05T18:20:46Z
0
0
[ "region:us" ]
[]
2025-05-05T18:20:44Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 986113 num_examples: 12500 download_size: 635326 dataset_size: 986113 configs: - config_name: default data_files: - split: train path: data/train-* ---
neuraxcompany/Check_in-Dataset
neuraxcompany
2025-05-05T18:04:26Z
8
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-04-28T18:26:57Z
null
--- dataset_info: features: - name: Check-In/Input dtype: string - name: Classifcation/Output dtype: string splits: - name: train num_bytes: 18951 num_examples: 139 download_size: 6116 dataset_size: 18951 configs: - config_name: default data_files: - split: train path: data/train-* ---
omourier/Lego_rouge
omourier
2025-05-05T18:01:27Z
44
0
[ "task_categories:robotics", "modality:video", "region:us", "phosphobot", "so100", "phospho-dk" ]
[ "robotics" ]
2025-05-04T16:57:17Z
null
--- tags: - phosphobot - so100 - phospho-dk task_categories: - robotics --- # Lego_rouge **This dataset was generated using a [phospho starter pack](https://robots.phospho.ai).** This dataset contains a series of episodes recorded with a robot and multiple cameras. It can be directly used to train a policy using imitation learning. It's compatible with LeRobot and RLDS.
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-labels-prm-indices_84480_92160
kaiwenw
2025-05-05T17:54:02Z
0
0
[ "region:us" ]
[]
2025-05-05T17:53:33Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1032849438 num_examples: 7680 download_size: 612176029 dataset_size: 1032849438 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-labels-prm-indices_69120_76800
kaiwenw
2025-05-05T17:53:27Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:53:03Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1029518855 num_examples: 7680 download_size: 610884582 dataset_size: 1029518855 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-labels-prm-indices_99840_107520
kaiwenw
2025-05-05T17:53:03Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:52:43Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1026838073 num_examples: 7680 download_size: 608914979 dataset_size: 1026838073 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-labels-prm-indices_7680_15360
kaiwenw
2025-05-05T17:52:06Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:51:44Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1026378721 num_examples: 7680 download_size: 609169924 dataset_size: 1026378721 configs: - config_name: default data_files: - split: train path: data/train-* ---
ntnu-smil/longclip_qa_keyword_normalized
ntnu-smil
2025-05-05T17:38:10Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:22:06Z
null
--- dataset_info: features: - name: question_id dtype: string - name: speaker_id dtype: string - name: relevance dtype: float64 - name: asr dtype: string - name: response1 dtype: string - name: response2 dtype: string - name: response3 dtype: string - name: response4 dtype: string - name: form_id dtype: string - name: response1_score dtype: float64 - name: response2_score dtype: float64 - name: response3_score dtype: float64 - name: response4_score dtype: float64 - name: similarity1 dtype: float64 - name: similarity2 dtype: float64 - name: similarity3 dtype: float64 - name: similarity4 dtype: float64 splits: - name: train num_bytes: 279612 num_examples: 227 - name: validation num_bytes: 45886 num_examples: 34 - name: test num_bytes: 49825 num_examples: 40 download_size: 220635 dataset_size: 375323 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
sophiayk20/restarts-both-speakers
sophiayk20
2025-05-05T17:34:30Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:33:03Z
null
--- dataset_info: features: - name: id dtype: string - name: dialogue dtype: string - name: disfluent_dialogue dtype: string - name: summary dtype: string splits: - name: ATOS num_bytes: 2382843 num_examples: 1500 - name: OTOS num_bytes: 2444700 num_examples: 1500 download_size: 1192578 dataset_size: 4827543 configs: - config_name: default data_files: - split: ATOS path: data/ATOS-* - split: OTOS path: data/OTOS-* ---
nischalon10/neetcode
nischalon10
2025-05-05T17:34:23Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:34:18Z
null
--- dataset_info: features: - name: id dtype: int64 - name: slug dtype: string - name: title dtype: string - name: difficulty dtype: string - name: content dtype: string - name: java dtype: string - name: c++ dtype: string - name: python dtype: string - name: javascript dtype: string - name: acRate dtype: string - name: similarQuestions dtype: string - name: topicTags dtype: string - name: canon_title dtype: string splits: - name: train num_bytes: 13360626.427966101 num_examples: 1990 - name: validation num_bytes: 1483768.0605932204 num_examples: 221 - name: test num_bytes: 1000368.5114406779 num_examples: 149 download_size: 7218674 dataset_size: 15844762.999999998 configs: - config_name: default data_files: - split: train path: data/train-* - split: validation path: data/validation-* - split: test path: data/test-* ---
graf/ultra-sft-selfgen
graf
2025-05-05T17:30:26Z
0
0
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:29:49Z
null
--- dataset_info: features: - name: prompt dtype: string - name: chosen dtype: string - name: rejected dtype: string - name: chosen_feedback sequence: string - name: rejected_feedback sequence: string - name: chosen_reward sequence: float64 - name: rejected_reward sequence: float64 - name: id dtype: string splits: - name: train num_bytes: 969657547 num_examples: 122324 download_size: 457829318 dataset_size: 969657547 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_0.0_alpha_0.2_num-company_2_dataset_1_for_gen_17_v2
HungVu2003
2025-05-05T17:26:44Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:26:42Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3081126 num_examples: 13750 download_size: 938463 dataset_size: 3081126 configs: - config_name: default data_files: - split: train path: data/train-* ---
HungVu2003/opt-350m_beta_1.0_alpha_0.4_num-company_2_dataset_0_for_gen_7_v2
HungVu2003
2025-05-05T17:24:17Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:24:15Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3518844 num_examples: 15000 download_size: 1596382 dataset_size: 3518844 configs: - config_name: default data_files: - split: train path: data/train-* ---
zheminh/SWE-bench_Lite_oracle_2
zheminh
2025-05-05T17:17:09Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:00:48Z
null
--- dataset_info: features: - name: instance_id dtype: string - name: text dtype: string - name: repo dtype: string - name: base_commit dtype: string - name: problem_statement dtype: string - name: hints_text dtype: string - name: created_at dtype: string - name: patch dtype: string - name: test_patch dtype: string - name: version dtype: string - name: FAIL_TO_PASS dtype: string - name: PASS_TO_PASS dtype: string - name: environment_setup_commit dtype: string splits: - name: test num_bytes: 21007265 num_examples: 300 download_size: 8820799 dataset_size: 21007265 configs: - config_name: default data_files: - split: test path: data/test-* ---
inovruzova/azerbaijani-art-collection
inovruzova
2025-05-05T17:13:47Z
22
0
[ "task_categories:image-classification", "license:cc-by-4.0", "size_categories:n<1K", "format:imagefolder", "modality:image", "library:datasets", "library:mlcroissant", "region:us", "art" ]
[ "image-classification" ]
2025-05-03T11:47:23Z
null
--- license: cc-by-4.0 task_categories: - image-classification tags: - art size_categories: - n<1K --- Note: Data is collected by the Afina Apayeva, Ariana Kenbayeva, Ilhama Novruzova, Mehriban Aliyeva, and only art_metal category is taken by scraping Azerbaijan Carpet Museum's official website. --- **Dataset Source:** We took the pictures of art works by smartphones. For some of them, we took their pictures from 3 perspectives: left, right, and front. For most of them, we took just one picture from the front side to avoid data duplication. - Primary photos taken by team members at: - [Azerbaijan National Museum of Art](https://nationalartmuseum.az/?lang=en) - [Azerbaijan State Museum of Musical Culture](https://www.musicmuseum.az/en/index.php) - Additional images scraped for class balancing (*art_metal* category) with the consent of [Azerbaijani National Carpet Museum](https://azcarpetmuseum.az/en) --- **Dataset Details:** - Total Images: `625` - Classes: `painting`, `musical_instruments`, `art_metal`, `sculpture`, `sketch`, `keramics`, `photography` - Size of Downloaded Dataset Files: `1.87 GB`
themachinefan/test_9b
themachinefan
2025-05-05T17:10:51Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:10:48Z
null
--- dataset_info: features: - name: lora_id dtype: string - name: base_model dtype: string - name: template dtype: string - name: undesired_text dtype: string - name: desired_text sequence: string - name: dataset_type dtype: string - name: dataset_info dtype: string splits: - name: train num_bytes: 462 num_examples: 1 download_size: 4982 dataset_size: 462 configs: - config_name: default data_files: - split: train path: data/train-* ---
ai2-adapt-dev/interactive_tool_use_gpt4o
ai2-adapt-dev
2025-05-05T17:09:09Z
272
1
[ "size_categories:100K<n<1M", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-04-17T18:47:59Z
null
--- dataset_info: features: - name: id dtype: string - name: messages list: - name: content dtype: string - name: function_calls dtype: string - name: functions dtype: string - name: role dtype: string - name: source dtype: string - name: interaction_type dtype: string splits: - name: train num_bytes: 304295007 num_examples: 104638 download_size: 80226148 dataset_size: 304295007 configs: - config_name: default data_files: - split: train path: data/train-* ---
Voxel51/mind2web_multimodal_test_website
Voxel51
2025-05-05T17:04:21Z
25
1
[ "task_categories:image-classification", "task_categories:object-detection", "language:en", "size_categories:1K<n<10K", "format:imagefolder", "modality:image", "library:datasets", "library:mlcroissant", "library:fiftyone", "arxiv:2401.01614", "region:us", "fiftyone", "visual-agents", "os-agents", "gui-grounding", "image", "image-classification", "object-detection" ]
[ "image-classification", "object-detection" ]
2025-04-30T19:51:00Z
null
--- annotations_creators: [] language: en size_categories: - 1K<n<10K task_categories: - image-classification - object-detection task_ids: [] pretty_name: mind2web_multimodal tags: - fiftyone - visual-agents - os-agents - gui-grounding - image - image-classification - object-detection dataset_summary: ' This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 1019 samples. ## Installation If you haven''t already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include ''max_samples'', etc dataset = load_from_hub("Voxel51/mind2web_multimodal_test_website") # Launch the App session = fo.launch_app(dataset) ``` ' --- # Dataset Card for Multimodal Mind2Web "Cross-Website" Test Split **Note**: This dataset is the test split of the Cross-Website dataset introduced in the paper. ![image/png](m2w_tw.gif) This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 1019 samples. ## Installation If you haven't already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include 'max_samples', etc dataset = load_from_hub("Voxel51/mind2web_multimodal_test_website") # Launch the App session = fo.launch_app(dataset) ``` # Dataset Details for "Cross-Website" Split in Multimodal Mind2Web ## Dataset Description **Curated by:** The Ohio State University NLP Group (OSU-NLP-Group) **Shared by:** OSU-NLP-Group on Hugging Face **Language(s) (NLP):** en **License:** OPEN-RAIL License (mentioned in the Impact Statements section) ## Dataset Sources **Repository:** https://github.com/OSU-NLP-Group/SeeAct and https://huggingface.co/datasets/osunlp/Multimodal-Mind2Web **Paper:** "GPT-4V(ision) is a Generalist Web Agent, if Grounded" by Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, Yu Su **Demo:** https://osu-nlp-group.github.io/SeeAct ## Uses ### Direct Use - Evaluating web agents' ability to generalize to new websites within familiar domains - Testing website-level transfer capabilities of models - Benchmarking adaptability to new website interfaces with similar functionality - Assessing how models handle design variations within the same domain category ### Out-of-Scope Use - Developing web agents for harmful purposes (as stated in the paper's impact statement) - Automating actions that could violate website terms of service - Creating agents that access users' personal profiles or perform sensitive operations without consent ## Dataset Structure - Contains 142 tasks across 9 domains and 10 websites - Tasks average 7.2 actions each - Average 4,653 visual tokens per task (highest among the three splits) - Average 612 HTML elements per task (most complex pages among the splits) - Average 114,358 HTML tokens per task - Each example includes task descriptions, HTML structure, operations (CLICK, TYPE, SELECT), target elements with attributes, and action histories ### FiftyOne Dataset Structure **Basic Info:** 1,338 web UI screenshots with task-based annotations **Core Fields:** - `action_uid`: StringField - Unique action identifier - `annotation_id`: StringField - Annotation identifier - `target_action_index`: IntField - Index of target action in sequence - `ground_truth`: EmbeddedDocumentField(Detection) - Element to interact with: - `label`: Action type (TYPE, CLICK) - `bounding_box`: a list of relative bounding box coordinates in [0, 1] in the following format: `<top-left-x>, <top-left-y>, <width>, <height>]` - `target_action_reprs`: String representation of target action - `website`: EmbeddedDocumentField(Classification) - Website name - `domain`: EmbeddedDocumentField(Classification) - Website domain category - `subdomain`: EmbeddedDocumentField(Classification) - Website subdomain category - `task_description`: StringField - Natural language description of the task - `full_sequence`: ListField(StringField) - Complete sequence of actions for the task - `previous_actions`: ListField - Actions already performed in the sequence - `current_action`: StringField - Action to be performed - `alternative_candidates`: EmbeddedDocumentField(Detections) - Other possible elements ## Dataset Creation ### Curation Rationale The Cross-Website split was specifically designed to evaluate an agent's ability to generalize to new websites within domains it has encountered during training, representing a medium difficulty generalization scenario. ### Source Data #### Data Collection and Processing - Based on the original MIND2WEB dataset - Each HTML document is aligned with its corresponding webpage screenshot image - Underwent human verification to confirm element visibility and correct rendering for action prediction - Specifically includes 10 new websites from the top-level domains represented in the training data #### Who are the source data producers? Web screenshots and HTML were collected from 10 websites across 9 domains that were represented in the training data, but the specific websites were held out. ### Annotations #### Annotation process Each task includes annotated action sequences showing the correct steps to complete the task. These were likely captured through a tool that records user actions on websites. #### Who are the annotators? Researchers from The Ohio State University NLP Group or hired annotators, though specific details aren't provided in the paper. ### Personal and Sensitive Information The dataset focuses on non-login tasks to comply with user agreements and avoid privacy issues. ## Bias, Risks, and Limitations - This split presents a medium difficulty generalization scenario, testing adaptation to new interfaces within familiar domains - In-context learning methods show advantages over supervised fine-tuning on this split - The pages in this split are the most complex in terms of HTML elements and have the highest average visual tokens - Website layouts and functionality may change over time, affecting the validity of the dataset - Limited to only 10 websites across 9 domains, may not capture the full diversity of websites within those domains ## Citation ### BibTeX: ```bibtex @article{zheng2024seeact, title={GPT-4V(ision) is a Generalist Web Agent, if Grounded}, author={Boyuan Zheng and Boyu Gou and Jihyung Kil and Huan Sun and Yu Su}, booktitle={Forty-first International Conference on Machine Learning}, year={2024}, url={https://openreview.net/forum?id=piecKJ2DlB}, } @inproceedings{deng2023mindweb, title={Mind2Web: Towards a Generalist Agent for the Web}, author={Xiang Deng and Yu Gu and Boyuan Zheng and Shijie Chen and Samuel Stevens and Boshi Wang and Huan Sun and Yu Su}, booktitle={Thirty-seventh Conference on Neural Information Processing Systems}, year={2023}, url={https://openreview.net/forum?id=kiYqbO3wqw} } ``` ### APA: Zheng, B., Gou, B., Kil, J., Sun, H., & Su, Y. (2024). GPT-4V(ision) is a Generalist Web Agent, if Grounded. arXiv preprint arXiv:2401.01614. ## Dataset Card Contact GitHub: https://github.com/OSU-NLP-Group/SeeAct
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-old-prm-indices_92160_99840
kaiwenw
2025-05-05T17:03:00Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:02:49Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1025447659 num_examples: 7680 download_size: 238327115 dataset_size: 1025447659 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-old-prm-indices_0_7680
kaiwenw
2025-05-05T17:02:37Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:02:26Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1031600947 num_examples: 7680 download_size: 239593968 dataset_size: 1031600947 configs: - config_name: default data_files: - split: train path: data/train-* ---
kaiwenw/distill-r1-qwen-1.5b-aime-25-4096-with-old-prm-indices_23040_30720
kaiwenw
2025-05-05T17:02:24Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T17:02:13Z
null
--- dataset_info: features: - name: message_id dtype: string - name: problem dtype: string - name: answer dtype: string - name: processed_answer dtype: string - name: responses dtype: string - name: reward dtype: bool - name: prompt_len dtype: int64 - name: response_len dtype: int64 - name: classifier_scores sequence: float64 splits: - name: train num_bytes: 1021291032 num_examples: 7680 download_size: 237781547 dataset_size: 1021291032 configs: - config_name: default data_files: - split: train path: data/train-* ---
Voxel51/mind2web_multimodal_test_domain
Voxel51
2025-05-05T16:54:10Z
43
1
[ "task_categories:image-classification", "task_categories:object-detection", "language:en", "size_categories:1K<n<10K", "format:imagefolder", "modality:image", "library:datasets", "library:mlcroissant", "library:fiftyone", "arxiv:2401.01614", "region:us", "fiftyone", "visual-agents", "os-agents", "gui-grounding", "image", "image-classification", "object-detection" ]
[ "image-classification", "object-detection" ]
2025-04-30T20:20:47Z
null
--- annotations_creators: [] language: en size_categories: - 1K<n<10K task_categories: - image-classification - object-detection task_ids: [] pretty_name: mind2web_multimodal_test_domain tags: - fiftyone - visual-agents - os-agents - gui-grounding - image - image-classification - object-detection dataset_summary: ' This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 4050 samples. ## Installation If you haven''t already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include ''max_samples'', etc dataset = load_from_hub("Voxel51/mind2web_multimodal_test_domain") # Launch the App session = fo.launch_app(dataset) ``` ' --- # Dataset Card for "Cross-Domain" Test Split in Multimodal Mind2Web **Note**: This dataset is the test split of the Cross-Domain dataset introduced in the paper. ![image/png](m2w_td.gif) This is a [FiftyOne](https://github.com/voxel51/fiftyone) dataset with 4050 samples. ## Installation If you haven't already, install FiftyOne: ```bash pip install -U fiftyone ``` ## Usage ```python import fiftyone as fo from fiftyone.utils.huggingface import load_from_hub # Load the dataset # Note: other available arguments include 'max_samples', etc dataset = load_from_hub("Voxel51/mind2web_multimodal_test_domain") # Launch the App session = fo.launch_app(dataset) ``` ## Dataset Description **Curated by:** The Ohio State University NLP Group (OSU-NLP-Group) **Shared by:** OSU-NLP-Group on Hugging Face **Language(s) (NLP):** en **License:** OPEN-RAIL License ## Dataset Sources **Repository:** https://github.com/OSU-NLP-Group/SeeAct and https://huggingface.co/datasets/osunlp/Multimodal-Mind2Web **Paper:** "GPT-4V(ision) is a Generalist Web Agent, if Grounded" by Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, Yu Su **Demo:** https://osu-nlp-group.github.io/SeeAct ## Uses ### Direct Use - Evaluating web agents' ability to generalize to entirely new domains - Testing zero-shot domain transfer capabilities of models - Benchmarking the true generalist capabilities of web agents - Assessing model performance in unseen web environments ### Out-of-Scope Use - Developing web agents for harmful purposes (as stated in the paper's impact statement) - Automating actions that could violate website terms of service - Creating agents that access users' personal profiles or perform sensitive operations without consent ## Dataset Structure - Contains 694 tasks across 13 domains and 53 websites - Tasks average 5.9 actions each - Average 4,314 visual tokens per task - Average 494 HTML elements per task - Average 91,163 HTML tokens per task - Each example includes task descriptions, HTML structure, operations (CLICK, TYPE, SELECT), target elements with attributes, and action histories ### FiftyOne Dataset Structure **Basic Info:** 1,338 web UI screenshots with task-based annotations **Core Fields:** - `action_uid`: StringField - Unique action identifier - `annotation_id`: StringField - Annotation identifier - `target_action_index`: IntField - Index of target action in sequence - `ground_truth`: EmbeddedDocumentField(Detection) - Element to interact with: - `label`: Action type (TYPE, CLICK) - `bounding_box`: a list of relative bounding box coordinates in [0, 1] in the following format: `<top-left-x>, <top-left-y>, <width>, <height>]` - `target_action_reprs`: String representation of target action - `website`: EmbeddedDocumentField(Classification) - Website name - `domain`: EmbeddedDocumentField(Classification) - Website domain category - `subdomain`: EmbeddedDocumentField(Classification) - Website subdomain category - `task_description`: StringField - Natural language description of the task - `full_sequence`: ListField(StringField) - Complete sequence of actions for the task - `previous_actions`: ListField - Actions already performed in the sequence - `current_action`: StringField - Action to be performed - `alternative_candidates`: EmbeddedDocumentField(Detections) - Other possible elements ## Dataset Creation ### Curation Rationale The Cross-Domain split was specifically designed to evaluate an agent's ability to generalize to entirely new domains it hasn't encountered during training, representing the most challenging generalization scenario. ### Source Data #### Data Collection and Processing - Based on the original MIND2WEB dataset - Each HTML document is aligned with its corresponding webpage screenshot image - Underwent human verification to confirm element visibility and correct rendering for action prediction - Specifically includes websites from top-level domains held out from the training data #### Who are the source data producers? Web screenshots and HTML were collected from 53 websites across 13 domains that were not represented in the training data. ### Annotations #### Annotation process Each task includes annotated action sequences showing the correct steps to complete the task. These were likely captured through a tool that records user actions on websites. #### Who are the annotators? Researchers from The Ohio State University NLP Group or hired annotators, though specific details aren't provided in the paper. ### Personal and Sensitive Information The dataset focuses on non-login tasks to comply with user agreements and avoid privacy issues. ## Bias, Risks, and Limitations - This split presents the most challenging generalization scenario as it tests performance on entirely unfamiliar domains - In-context learning methods with large models show better performance than supervised fine-tuning on this split - The gap between SEEACTOracle and other methods is largest in this split (23.2% step success rate difference) - Website layouts and functionality may change over time, affecting the validity of the dataset - Limited to the specific domains included; may not fully represent all possible web domains ## Citation ### BibTeX: ```bibtex @article{zheng2024seeact, title={GPT-4V(ision) is a Generalist Web Agent, if Grounded}, author={Boyuan Zheng and Boyu Gou and Jihyung Kil and Huan Sun and Yu Su}, booktitle={Forty-first International Conference on Machine Learning}, year={2024}, url={https://openreview.net/forum?id=piecKJ2DlB}, } @inproceedings{deng2023mindweb, title={Mind2Web: Towards a Generalist Agent for the Web}, author={Xiang Deng and Yu Gu and Boyuan Zheng and Shijie Chen and Samuel Stevens and Boshi Wang and Huan Sun and Yu Su}, booktitle={Thirty-seventh Conference on Neural Information Processing Systems}, year={2023}, url={https://openreview.net/forum?id=kiYqbO3wqw} } ``` ### APA: Zheng, B., Gou, B., Kil, J., Sun, H., & Su, Y. (2024). GPT-4V(ision) is a Generalist Web Agent, if Grounded. arXiv preprint arXiv:2401.01614. ## Dataset Card Contact GitHub: https://github.com/OSU-NLP-Group/SeeAct
yoad/heb_news_ocr_corpus_transformed
yoad
2025-05-05T16:51:04Z
1
0
[ "size_categories:1M<n<10M", "format:parquet", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T08:53:19Z
null
--- dataset_info: features: - name: article_id dtype: string - name: begins dtype: string - name: title dtype: string - name: type dtype: string - name: page dtype: int64 - name: text dtype: 'null' - name: ocr_text dtype: string - name: source_file dtype: string - name: newspaper_name dtype: string - name: date dtype: timestamp[us] - name: clean_ocr_text dtype: string - name: article_url dtype: string splits: - name: train num_bytes: 12279570153 num_examples: 2739218 download_size: 6541133124 dataset_size: 12279570153 configs: - config_name: default data_files: - split: train path: data/train-* ---
dopaul/simple_pawn_move_v4
dopaul
2025-05-05T16:41:31Z
0
0
[ "task_categories:robotics", "modality:video", "region:us", "phosphobot", "so100", "phospho-dk" ]
[ "robotics" ]
2025-05-05T16:26:14Z
null
--- tags: - phosphobot - so100 - phospho-dk task_categories: - robotics --- # simple_pawn_move_v4 **This dataset was generated using a [phospho starter pack](https://robots.phospho.ai).** This dataset contains a series of episodes recorded with a robot and multiple cameras. It can be directly used to train a policy using imitation learning. It's compatible with LeRobot and RLDS.
LukeBailey181/STPProverWarmupWithCot_testing
LukeBailey181
2025-05-05T16:39:37Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:39:35Z
null
--- dataset_info: features: - name: header dtype: string - name: theorem dtype: string - name: proof dtype: string - name: augmenter_prompt dtype: string - name: augmenter_return dtype: string - name: resulting_prover_prompt dtype: string - name: resulting_prover_target dtype: string splits: - name: train num_bytes: 199894 num_examples: 32 download_size: 70706 dataset_size: 199894 configs: - config_name: default data_files: - split: train path: data/train-* ---
alinatl/en-es-translation
alinatl
2025-05-05T16:39:20Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:38:45Z
null
--- dataset_info: features: - name: en list: - name: content dtype: string - name: role dtype: string - name: sp list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 2237 num_examples: 1 download_size: 4259 dataset_size: 2237 configs: - config_name: default data_files: - split: train path: data/train-* ---
sleeping-ai/LLM-as-Judge-retake-sat-baseline
sleeping-ai
2025-05-05T16:35:45Z
0
0
[ "license:mit", "size_categories:n<1K", "format:csv", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:04:42Z
null
--- license: mit --- I am storing all the baseline evals I ran for AGIEval-SAT-Math.
Jellywibble/CW_Cost
Jellywibble
2025-05-05T16:25:46Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:25:44Z
null
--- dataset_info: features: - name: category dtype: string - name: cost_dollars dtype: float64 - name: is_engineering dtype: bool - name: origin dtype: string - name: date dtype: date32 - name: on_demand dtype: bool - name: __index_level_0__ dtype: int64 splits: - name: train num_bytes: 54082 num_examples: 807 download_size: 9023 dataset_size: 54082 configs: - config_name: default data_files: - split: train path: data/train-* ---
svjack/Day_if_sentient_beings_SPLITED_AMu_CARD
svjack
2025-05-05T16:19:17Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:audio", "modality:image", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T13:44:22Z
null
--- dataset_info: features: - name: audio dtype: audio - name: image dtype: image splits: - name: train num_bytes: 51047650.0 num_examples: 47 download_size: 51046203 dataset_size: 51047650.0 configs: - config_name: default data_files: - split: train path: data/train-* --- # Amao's Streaming Channel ## Channel Description **Amao_o** is a lively streamer from a beautiful city known as a "sea of flowers". Presenting as an adorable (but secretly mischievous) kitten/puppy hybrid persona, they host engaging live streams. ## Streaming Details • **Primary Content**: Gaming/Mixed Topics/Therapeutic Chat • **Schedule**: • **Main Stream**: 8:00 PM - 4:00 AM (local time) • **Community Chat**: 7:00 PM - 1:00 AM in group 985085334 ## Community Join our cozy community hub in QQ group: 985085334 ("Our Little Cottage") ![image/webp](https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/FwvnIuZFAmCqf69kXxUnD.webp) ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/RRp3UCc5Dgwc0hJAO9sTb.jpeg) ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/0r805lrtITcE7qMgySAxh.jpeg)
dgambettaphd/D_llm3_gen4_WXS_doc1000_synt64_lr1e-04_acm_SYNLAST
dgambettaphd
2025-05-05T16:17:26Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:17:22Z
null
--- dataset_info: features: - name: id_doc dtype: int64 - name: text dtype: string - name: dataset dtype: string - name: gen dtype: int64 - name: synt dtype: int64 - name: MPP dtype: float64 splits: - name: train num_bytes: 12955987 num_examples: 20000 download_size: 7770912 dataset_size: 12955987 configs: - config_name: default data_files: - split: train path: data/train-* ---
samahadhoud/decomposed-tikz-dataset-30-40
samahadhoud
2025-05-05T16:15:02Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:13:53Z
null
--- dataset_info: features: - name: id dtype: string - name: png dtype: image - name: code dtype: string splits: - name: train num_bytes: 938623908.404 num_examples: 74708 download_size: 621479832 dataset_size: 938623908.404 configs: - config_name: default data_files: - split: train path: data/train-* ---
zhengbang0707/REFUEL_it2_mask1_v2_llama3
zhengbang0707
2025-05-05T16:09:44Z
0
0
[ "size_categories:100K<n<1M", "modality:tabular", "modality:text", "region:us" ]
[]
2025-05-05T16:00:55Z
null
--- configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* - split: val path: data/val-* dataset_info: features: - name: chosen list: - name: content dtype: string - name: role dtype: string - name: reject list: - name: content dtype: string - name: role dtype: string - name: chosen_token sequence: int64 - name: reject_token sequence: int64 - name: chosen_mask sequence: int64 - name: chosen_mask_user sequence: int64 - name: reject_mask sequence: int64 - name: reject_mask_user sequence: int64 - name: chosen_reward_list sequence: float64 - name: reject_reward_list sequence: float64 - name: chosen_reward_list_new sequence: float64 - name: reject_reward_list_new sequence: float64 - name: chosen_reward dtype: float64 - name: reject_reward dtype: float64 splits: - name: train num_bytes: 12237301422 num_examples: 115117 - name: test num_bytes: 53174161 num_examples: 500 - name: val num_bytes: 53044292 num_examples: 500 download_size: 593328057 dataset_size: 12343519875 --- # Dataset Card for "REFUEL_it2_mask1_v2_llama3" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
qingy2024/ACT75
qingy2024
2025-05-05T16:01:48Z
0
0
[ "license:apache-2.0", "size_categories:n<1K", "format:json", "modality:text", "modality:video", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T16:01:08Z
null
--- license: apache-2.0 ---
hpederm/kex_small
hpederm
2025-05-05T15:57:56Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:image", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T15:57:54Z
null
--- dataset_info: features: - name: anchor dtype: string - name: positive dtype: string splits: - name: train num_bytes: 3221622 num_examples: 800 - name: valid num_bytes: 408119 num_examples: 100 - name: test num_bytes: 402383 num_examples: 100 download_size: 1800470 dataset_size: 4032124 configs: - config_name: default data_files: - split: train path: data/train-* - split: valid path: data/valid-* - split: test path: data/test-* ---
DuckZH/so100_test
DuckZH
2025-05-05T15:47:33Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:timeseries", "modality:video", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us", "LeRobot", "so100", "tutorial" ]
[ "robotics" ]
2025-05-05T15:10:31Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot - so100 - tutorial configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.0", "robot_type": "so100", "total_episodes": 6, "total_frames": 8976, "total_tasks": 1, "total_videos": 12, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:6" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.images.wrist": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.front": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```
HungVu2003/opt-350m_beta_0.0_alpha_0.2_num-company_2_dataset_0_for_gen_17_v2
HungVu2003
2025-05-05T15:41:42Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T15:41:41Z
null
--- dataset_info: features: - name: question dtype: string splits: - name: train num_bytes: 3135643 num_examples: 13750 download_size: 947611 dataset_size: 3135643 configs: - config_name: default data_files: - split: train path: data/train-* ---
Adriana213/pricer-data
Adriana213
2025-05-05T15:35:11Z
0
0
[ "region:us" ]
[]
2025-05-05T15:34:46Z
null
--- dataset_info: features: - name: text dtype: string - name: price dtype: float64 splits: - name: train num_bytes: 313746593 num_examples: 400000 - name: test num_bytes: 1558981 num_examples: 2000 download_size: 185245700 dataset_size: 315305574 configs: - config_name: default data_files: - split: train path: data/train-* - split: test path: data/test-* ---
Wilhelmlab/proteometools_ms2_charge
Wilhelmlab
2025-05-05T15:27:58Z
0
0
[ "license:cc-by-4.0", "size_categories:1M<n<10M", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T14:08:40Z
null
--- license: cc-by-4.0 dataset_info: features: - name: modified_sequence dtype: string - name: max_rt_end dtype: float64 - name: mean_mass dtype: float64 - name: mean_rt_apex dtype: float64 - name: min_andromeda_score dtype: float64 - name: min_rt_start dtype: float64 - name: raw_file sequence: string - name: package sequence: string - name: raw_file_count dtype: int64 - name: number_diff_cs dtype: int64 - name: most_abundant_charge_state sequence: int64 - name: observed_charge_states sequence: int64 - name: charge_state_dist sequence: float64 - name: intensity_sum dtype: float64 - name: mean_abs_intensity sequence: float64 splits: - name: train num_bytes: 554485394 num_examples: 1243018 download_size: 123613327 dataset_size: 554485394 configs: - config_name: default data_files: - split: train path: data/train-* ---
austindavis/lichess-uci-tokenized-768
austindavis
2025-05-05T15:22:30Z
0
0
[ "size_categories:10K<n<100K", "format:parquet", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T15:22:27Z
null
--- dataset_info: features: - name: tokens sequence: int64 splits: - name: train num_bytes: 131790400 num_examples: 32144 download_size: 14459619 dataset_size: 131790400 configs: - config_name: default data_files: - split: train path: data/train-* ---
PHBD/medicaid-financial-management-data
PHBD
2025-05-05T15:06:01Z
0
0
[ "language:en", "size_categories:10K<n<100K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "hhs", "cms" ]
[]
2025-05-05T15:05:59Z
null
--- language: - en pretty_name: Medicaid Financial Management Data tags: - hhs - cms --- # Medicaid Financial Management Data ## Description This dataset reports summary state-by-state total expenditures by program for the Medicaid Program, Medicaid Administration and CHIP programs. These state expenditures are tracked through the automated Medicaid Budget and Expenditure System/State Children's Health Insurance Program Budget and Expenditure System (MBES/CBES). For more information, visit https://medicaid.gov/medicaid/finance/state-expenditure-reporting/expenditure-reports/index.html. ## Dataset Details - **Publisher**: Centers for Medicare & Medicaid Services - **Last Modified**: 2024-12-11 - **Contact**: Medicaid.gov (no-reply@data.medicaid.gov) ## Source Original data can be found at: https://healthdata.gov/d/2tf3-vhn2 ## Usage You can load this dataset using: ```python from datasets import load_dataset dataset = load_dataset("PHBD/medicaid-financial-management-data") ``` ## License This dataset is licensed under https://www.usa.gov/government-works
PHBD/impaired-driving-death-rate-by-age-and-sex-2012-an
PHBD
2025-05-05T15:05:58Z
0
0
[ "language:en", "license:odbl", "size_categories:n<1K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "hhs", "cdc" ]
[]
2025-05-05T15:05:57Z
null
--- language: - en pretty_name: Impaired Driving Death Rate, by Age and Sex, 2012 & 2014, Region 9 - San Francisco tags: - hhs - cdc - cdc license: odbl --- # Impaired Driving Death Rate, by Age and Sex, 2012 & 2014, Region 9 - San Francisco ## Description Rate of deaths by age/gender (per 100,000 population) for people killed in crashes involving a driver with BAC =>0.08%, 2012, 2014. 2012 Source: Fatality Analysis Reporting System (FARS). 2014 Source: National Highway Traffic Administration's (NHTSA) Fatality Analysis Reporting System (FARS), 2014 Annual Report File. Note: Blank cells indicate data are suppressed. Fatality rates based on fewer than 20 deaths are suppressed. ## Dataset Details - **Publisher**: Centers for Disease Control and Prevention - **Last Modified**: 2016-09-14 - **Contact**: CDC INFO (cdcinfo@cdc.gov) ## Source Original data can be found at: https://data.cdc.gov/d/3se3-rwj2 ## Usage You can load this dataset using: ```python from datasets import load_dataset dataset = load_dataset("PHBD/impaired-driving-death-rate-by-age-and-sex-2012-an") ``` ## License This dataset is licensed under http://opendefinition.org/licenses/odc-odbl/
PHBD/dqs-visits-to-physician-offices-hospital-outpatien
PHBD
2025-05-05T15:05:49Z
0
0
[ "language:en", "size_categories:1K<n<10K", "format:csv", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us", "hhs", "cdc", "men", "physicians", "white", "women" ]
[]
2025-05-05T15:05:47Z
null
--- language: - en pretty_name: 'DQS Visits to physician offices, hospital outpatient departments, and hospital emergency departments, by age, sex, and race: United States' tags: - hhs - cdc - men - physicians - white - women --- # DQS Visits to physician offices, hospital outpatient departments, and hospital emergency departments, by age, sex, and race: United States ## Description Data on visits to physician offices and hospital emergency departments in the United States, by age, sex, and race. Data are from Health, United States. SOURCE: National Center for Health Statistics, National Ambulatory Medical Care Survey and National Hospital Ambulatory Medical Care Survey. Search, visualize, and download these and other estimates from over 120 health topics with the NCHS Data Query System (DQS), available from: https://www.cdc.gov/nchs/dataquery/index.htm. ## Dataset Details - **Publisher**: Centers for Disease Control and Prevention - **Temporal Coverage**: 2000/2018 - **Last Modified**: 2025-04-21 - **Contact**: National Center for Health Statistics (healthus@cdc.gov) ## Source Original data can be found at: https://www.cdc.gov/nchs/hus ## Usage You can load this dataset using: ```python from datasets import load_dataset dataset = load_dataset("PHBD/dqs-visits-to-physician-offices-hospital-outpatien") ``` ## License This dataset is licensed under https://www.usa.gov/government-works
TheRealPilot638/Falcon3-1B-dvts-256_no_chunking_H200
TheRealPilot638
2025-05-05T14:34:35Z
0
0
[ "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T03:30:55Z
null
--- dataset_info: - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string - name: pred_weighted@32 dtype: string - name: pred_maj@32 dtype: string - name: pred_naive@32 dtype: string - name: pred_weighted@64 dtype: string - name: pred_maj@64 dtype: string - name: pred_naive@64 dtype: string - name: pred_weighted@128 dtype: string - name: pred_maj@128 dtype: string - name: pred_naive@128 dtype: string - name: pred_weighted@256 dtype: string - name: pred_maj@256 dtype: string - name: pred_naive@256 dtype: string splits: - name: train num_bytes: 176490377 num_examples: 450 download_size: 26566815 dataset_size: 176490377 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 128 num_examples: 4 download_size: 2047 dataset_size: 128 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string - name: pred_weighted@32 dtype: string - name: pred_maj@32 dtype: string - name: pred_naive@32 dtype: string - name: pred_weighted@64 dtype: string - name: pred_maj@64 dtype: string - name: pred_naive@64 dtype: string - name: pred_weighted@128 dtype: string - name: pred_maj@128 dtype: string - name: pred_naive@128 dtype: string - name: pred_weighted@256 dtype: string - name: pred_maj@256 dtype: string - name: pred_naive@256 dtype: string splits: - name: train num_bytes: 196516027 num_examples: 500 download_size: 29614997 dataset_size: 196516027 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 128 num_examples: 4 download_size: 2044 dataset_size: 128 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string - name: pred_weighted@32 dtype: string - name: pred_maj@32 dtype: string - name: pred_naive@32 dtype: string - name: pred_weighted@64 dtype: string - name: pred_maj@64 dtype: string - name: pred_naive@64 dtype: string - name: pred_weighted@128 dtype: string - name: pred_maj@128 dtype: string - name: pred_naive@128 dtype: string - name: pred_weighted@256 dtype: string - name: pred_maj@256 dtype: string - name: pred_naive@256 dtype: string splits: - name: train num_bytes: 196160244 num_examples: 500 download_size: 29600111 dataset_size: 196160244 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 128 num_examples: 4 download_size: 2036 dataset_size: 128 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last features: - name: problem dtype: string - name: solution dtype: string - name: answer dtype: string - name: subject dtype: string - name: level dtype: int64 - name: unique_id dtype: string - name: completions sequence: string - name: pred dtype: string - name: completion_tokens dtype: int64 - name: scores sequence: sequence: float64 - name: agg_scores sequence: float64 - name: pred_weighted@1 dtype: string - name: pred_maj@1 dtype: string - name: pred_naive@1 dtype: string - name: pred_weighted@2 dtype: string - name: pred_maj@2 dtype: string - name: pred_naive@2 dtype: string - name: pred_weighted@4 dtype: string - name: pred_maj@4 dtype: string - name: pred_naive@4 dtype: string - name: pred_weighted@8 dtype: string - name: pred_maj@8 dtype: string - name: pred_naive@8 dtype: string - name: pred_weighted@16 dtype: string - name: pred_maj@16 dtype: string - name: pred_naive@16 dtype: string - name: pred_weighted@32 dtype: string - name: pred_maj@32 dtype: string - name: pred_naive@32 dtype: string - name: pred_weighted@64 dtype: string - name: pred_maj@64 dtype: string - name: pred_naive@64 dtype: string - name: pred_weighted@128 dtype: string - name: pred_maj@128 dtype: string - name: pred_naive@128 dtype: string - name: pred_weighted@256 dtype: string - name: pred_maj@256 dtype: string - name: pred_naive@256 dtype: string splits: - name: train num_bytes: 196152625 num_examples: 500 download_size: 29565434 dataset_size: 196152625 - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals features: - name: n dtype: int64 - name: acc_naive dtype: float64 - name: acc_weighted dtype: float64 - name: acc_maj dtype: float64 splits: - name: train num_bytes: 128 num_examples: 4 download_size: 2050 dataset_size: 128 configs: - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-0--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-1--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-2--agg_strategy--last--evals/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last/train-* - config_name: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals data_files: - split: train path: HuggingFaceH4_MATH-500--T-0.8--top_p-1.0--n-256--m-4--iters-40--look-1--seed-3--agg_strategy--last--evals/train-* ---
BranoSandy/eval_act_so100_test_2
BranoSandy
2025-05-05T14:24:09Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "size_categories:1K<n<10K", "format:parquet", "modality:tabular", "modality:timeseries", "modality:video", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us", "LeRobot", "so100", "tutorial" ]
[ "robotics" ]
2025-05-05T14:23:51Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot - so100 - tutorial configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.1", "robot_type": "so100", "total_episodes": 2, "total_frames": 1634, "total_tasks": 1, "total_videos": 4, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:2" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.images.laptop": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.phone": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```
Marianne0Habib/stt-english-test-dataset-sample
Marianne0Habib
2025-05-05T14:15:27Z
0
0
[ "language:en", "size_categories:n<1K", "format:audiofolder", "modality:audio", "library:datasets", "library:mlcroissant", "region:us" ]
[]
2025-05-05T13:12:19Z
null
--- language: - en size_categories: - n<1K --- # 🗣️ English Speech Audio Dataset (Sample) This dataset contains English speech samples, annotated by `dialect`, `speaking rate`, `environmental condition`, and includes `ground truth transcriptions`. It is intended to support research and applications in `Automatic Speech Recognition (ASR)`, and `Spoken language understanding`. --- ## 📁 Dataset Structure - Audio segments are stored in .wav format - Accompanied by a CSV file (En_dataset.csv) with rich metadata --- ## 📊Dataset Statistics | Metric | Value | |----------------|----------------------------------------| | Total Segments | 500 | | Languages | English | | Audio Format | `.wav` | | Sampling Rate | 16 kHz | --- ## 📊 Data Insights **🔢Total Segments**: 500 **🌍Recording Conditions** | Environment | Count | |----------------|----------------------------------------| | Clean | 251 | | Noisy | 249 | **🕐Audio Properties** | Attribute | Category | Count/Value | |------------------------|----------|-------------| | Length Type | Short | 472 | | | Long | 28 | | Speaking Rate | Average | 306 | | | Fast | 189 | | | Slow | 5 | | Segment Length (sec) | Min | 1.74 | | | Max | 24.8 | | | Mean | 6.96 | --- # 🛠️ How to Use You can load the dataset using: ```python from datasets import load_dataset ds = load_dataset("Marianne0Habib/stt-english-test-dataset-sample") ```
macwiatrak/bacbench-antibiotic-resistance-protein-sequences
macwiatrak
2025-05-05T14:06:43Z
0
0
[ "region:us" ]
[]
2025-05-05T13:35:51Z
null
--- dataset_info: features: - name: genome_name dtype: string - name: contig_name sequence: string - name: protein_id sequence: sequence: string - name: protein_sequence sequence: sequence: string - name: taxid dtype: string - name: locus_tag sequence: sequence: string - name: start sequence: sequence: int64 - name: end sequence: sequence: int64 - name: product sequence: sequence: string splits: - name: train num_bytes: 40490436864 num_examples: 26052 download_size: 34207458365 dataset_size: 40490436864 configs: - config_name: default data_files: - split: train path: data/train-* ---
macwiatrak/bacbench-phenotypic-traits-protein-sequences
macwiatrak
2025-05-05T14:04:48Z
0
0
[ "region:us" ]
[]
2025-05-05T12:54:48Z
null
--- dataset_info: features: - name: genome_name dtype: string - name: contig_name sequence: string - name: protein_id sequence: sequence: string - name: protein_sequence sequence: sequence: string - name: taxid dtype: string - name: locus_tag sequence: sequence: string - name: start sequence: sequence: int64 - name: end sequence: sequence: int64 - name: product sequence: sequence: string splits: - name: train num_bytes: 37098323931 num_examples: 24462 download_size: 31451416670 dataset_size: 37098323931 configs: - config_name: default data_files: - split: train path: data/train-* ---
rocketeer-allied/Frether_demo
rocketeer-allied
2025-05-05T14:01:18Z
0
0
[ "region:us" ]
[]
2025-05-05T14:01:15Z
null
--- dataset_info: features: - name: prompt dtype: string splits: - name: train num_bytes: 196288 num_examples: 533 download_size: 51970 dataset_size: 196288 configs: - config_name: default data_files: - split: train path: data/train-* ---
masato-ka/so100_conditional_grasping
masato-ka
2025-05-05T13:58:26Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "region:us", "LeRobot", "so100", "tutorial" ]
[ "robotics" ]
2025-05-05T13:34:34Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot - so100 - tutorial configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.1", "robot_type": "so100", "total_episodes": 12, "total_frames": 7163, "total_tasks": 1, "total_videos": 12, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:12" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 7 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper", "condition" ] }, "observation.images.laptop": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.height": 480, "video.width": 640, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "video.fps": 30, "video.channels": 3, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```
Exgc/Aha-Bench
Exgc
2025-05-05T13:54:19Z
48
0
[ "size_categories:n<1K", "format:parquet", "modality:audio", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T07:17:22Z
null
--- dataset_info: features: - name: audio dtype: audio - name: question_id dtype: string - name: type dtype: string - name: question dtype: string - name: answer dtype: string - name: answer_details dtype: string - name: text dtype: string - name: label dtype: string splits: - name: sample_with_audio num_bytes: 213747099.0 num_examples: 649 - name: sample_without_audio num_bytes: 11267686.0 num_examples: 32 download_size: 93234681 dataset_size: 225014785.0 configs: - config_name: default data_files: - split: sample_with_audio path: data/sample_with_audio-* - split: sample_without_audio path: data/sample_without_audio-* ---
gunnybd01/Consumer_Cyclical_News
gunnybd01
2025-05-05T13:31:35Z
0
0
[ "region:us" ]
[]
2025-05-05T13:31:27Z
null
--- dataset_info: features: - name: Date dtype: string - name: Symbol dtype: string - name: Article dtype: string splits: - name: train num_bytes: 432167634 num_examples: 87160 download_size: 199016910 dataset_size: 432167634 configs: - config_name: default data_files: - split: train path: data/train-* ---
gunnybd01/Communication_Services_News
gunnybd01
2025-05-05T13:30:19Z
0
0
[ "region:us" ]
[]
2025-05-05T13:30:12Z
null
--- dataset_info: features: - name: Date dtype: string - name: Symbol dtype: string - name: Article dtype: string splits: - name: train num_bytes: 175209563 num_examples: 34826 download_size: 84514999 dataset_size: 175209563 configs: - config_name: default data_files: - split: train path: data/train-* ---
shylee/so100_pengripA
shylee
2025-05-05T13:29:45Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "region:us", "LeRobot", "so100", "pengrip" ]
[ "robotics" ]
2025-05-05T12:57:01Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot - so100 - pengrip configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.1", "robot_type": "so100", "total_episodes": 25, "total_frames": 6007, "total_tasks": 1, "total_videos": 75, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:25" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.images.FrontCam": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.TopCam": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.WristCam": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```
athenasaurav/hindi_qa_100
athenasaurav
2025-05-05T13:28:16Z
0
0
[ "task_categories:question-answering", "task_ids:open-domain-qa", "annotations_creators:machine-generated", "language_creators:machine-generated", "multilinguality:monolingual", "language:hi", "license:cc-by-nc-4.0", "size_categories:10K<n<100K", "region:us" ]
[ "question-answering" ]
2025-05-05T13:28:10Z
null
--- annotations_creators: [machine-generated] language_creators: [machine-generated] language: [hi] license: cc-by-nc-4.0 multilinguality: monolingual size_categories: [10K<n<100K] source_datasets: [] task_categories: [question-answering] task_ids: [open-domain-qa] pretty_name: Hindi QA Dataset --- # Hindi QA Dataset # Pretraining ## Overview We find that trying to keep good semantic understanding of text boosts the models ability when speaking naturally and empathetically. We propose training the model on batches of speech and text. If you want the model to retain a large part of its text ability - i.e. function as an end-to-end speech model you could keep the ratio of text batch :speech batch as 2:1 to start (for example) and then gradually decrease to 1:1 throughout training. If your model is just trained for TTS start with 1:1 and gradually decrease to 0:1. ## Train ### Config Include your datasets and other hyperparams in the YAML file. ### Setup and start ```bash pip install transformers trl wandb flash_attn datasets torch ``` You may need to try different different versions of `flash_attn` depending on your torch/cuda/python version. ```bash accelerate launch pretrain.py ``` ### Disclaimer This code was copy and pasted into this repo quickly so there maybe bugs. The general outline should be pretty straightforward. It's also set up for multinode training. Depending on how good the models reasoning abilities to be (and what specifically you want to retain), you can choose with text-based dataset you use. Using simple datasets with QA pairs (for finetuning like ) works pretty well. You can also try using wikipedia - to boost the # Hindi QA Dataset This dataset contains question-answer pairs in Hindi, generated using GPT-3.5-turbo. Each question and answer is a single sentence, with a mix of easy, medium, and hard questions, and varying lengths (15-50 words). ## Format - Each entry is a dictionary with two fields: - `Question`: The question in Hindi - `Answer`: The answer in Hindi ## Example ```json { "Question": "भारत की राजधानी क्या है?", "Answer": "भारत की राजधानी नई दिल्ली है।" } ``` ## Usage You can load this dataset using the HuggingFace Datasets library: ```python from datasets import load_dataset ds = load_dataset("athenasaurav/hindi_qa_100", split="train") print(ds[0]) ``` ## License This dataset is licensed under [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/). Non-commercial use only.
dijisoz23/nutuk_final_benchmark_data
dijisoz23
2025-05-05T13:17:24Z
0
0
[ "region:us" ]
[]
2025-05-05T13:07:30Z
null
--- dataset_info: features: - name: question dtype: string - name: answer dtype: string - name: question_type dtype: string splits: - name: train num_bytes: 214865 num_examples: 510 download_size: 119000 dataset_size: 214865 configs: - config_name: default data_files: - split: train path: data/train-* ---
myScribe/training_data_2025_05_05
myScribe
2025-05-05T13:14:56Z
0
0
[ "region:us" ]
[]
2025-05-05T13:14:54Z
null
--- dataset_info: features: - name: id dtype: string - name: prompt struct: - name: content dtype: string - name: role dtype: string - name: chosen struct: - name: content dtype: string - name: role dtype: string - name: conversation list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 903537 num_examples: 29 download_size: 441500 dataset_size: 903537 configs: - config_name: default data_files: - split: train path: data/train-* ---
AliAfkhamii/hf_emotion_generation_texts
AliAfkhamii
2025-05-05T13:13:22Z
0
0
[ "region:us" ]
[]
2025-05-05T13:13:12Z
null
--- dataset_info: features: - name: text dtype: string - name: label dtype: string splits: - name: train num_bytes: 43469 num_examples: 563 download_size: 19377 dataset_size: 43469 configs: - config_name: default data_files: - split: train path: data/train-* ---
ayamekajou/pixmocap-part3
ayamekajou
2025-05-05T13:12:28Z
0
0
[ "region:us" ]
[]
2025-05-05T11:54:01Z
null
--- dataset_info: features: - name: image_url dtype: string - name: caption dtype: string - name: transcripts sequence: string - name: image dtype: image splits: - name: train num_bytes: 107493633391.696 num_examples: 158604 download_size: 106908056793 dataset_size: 107493633391.696 configs: - config_name: default data_files: - split: train path: data/train-* ---
anaterna/airflow-class-summarization
anaterna
2025-05-05T13:06:15Z
0
0
[ "region:us" ]
[]
2025-05-05T13:06:13Z
null
--- dataset_info: features: - name: module_path dtype: string - name: class_name dtype: string - name: parent_class sequence: string - name: source_code dtype: string - name: docstring dtype: string splits: - name: train num_bytes: 581548 num_examples: 124 download_size: 236092 dataset_size: 581548 configs: - config_name: default data_files: - split: train path: data/train-* ---
jysim/koch_block_2025050521
jysim
2025-05-05T13:03:17Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "region:us", "LeRobot", "tutorial" ]
[ "robotics" ]
2025-05-05T12:21:37Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot - tutorial configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.1", "robot_type": "koch", "total_episodes": 21, "total_frames": 11888, "total_tasks": 1, "total_videos": 42, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:21" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.images.webcam": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.height": 480, "video.width": 640, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "video.fps": 30, "video.channels": 3, "has_audio": false } }, "observation.images.phone": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.height": 480, "video.width": 640, "video.codec": "av1", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "video.fps": 30, "video.channels": 3, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```
myScribe/training_data_foo_bar
myScribe
2025-05-05T12:59:32Z
0
0
[ "size_categories:n<1K", "format:parquet", "modality:text", "library:datasets", "library:pandas", "library:mlcroissant", "library:polars", "region:us" ]
[]
2025-05-05T12:59:29Z
null
--- dataset_info: features: - name: id dtype: string - name: prompt struct: - name: content dtype: string - name: role dtype: string - name: chosen struct: - name: content dtype: string - name: role dtype: string - name: conversation list: - name: content dtype: string - name: role dtype: string splits: - name: train num_bytes: 933639 num_examples: 29 download_size: 448875 dataset_size: 933639 configs: - config_name: default data_files: - split: train path: data/train-* ---
jlesein/TestCube8
jlesein
2025-05-05T12:57:08Z
0
0
[ "task_categories:robotics", "license:apache-2.0", "size_categories:10K<n<100K", "format:parquet", "modality:tabular", "modality:timeseries", "modality:video", "library:datasets", "library:dask", "library:mlcroissant", "library:polars", "region:us", "LeRobot" ]
[ "robotics" ]
2025-05-05T12:55:35Z
null
--- license: apache-2.0 task_categories: - robotics tags: - LeRobot configs: - config_name: default data_files: data/*/*.parquet --- This dataset was created using [LeRobot](https://github.com/huggingface/lerobot). ## Dataset Description - **Homepage:** [More Information Needed] - **Paper:** [More Information Needed] - **License:** apache-2.0 ## Dataset Structure [meta/info.json](meta/info.json): ```json { "codebase_version": "v2.1", "robot_type": "so100", "total_episodes": 100, "total_frames": 40200, "total_tasks": 1, "total_videos": 300, "total_chunks": 1, "chunks_size": 1000, "fps": 30, "splits": { "train": "0:100" }, "data_path": "data/chunk-{episode_chunk:03d}/episode_{episode_index:06d}.parquet", "video_path": "videos/chunk-{episode_chunk:03d}/{video_key}/episode_{episode_index:06d}.mp4", "features": { "action": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.state": { "dtype": "float32", "shape": [ 6 ], "names": [ "main_shoulder_pan", "main_shoulder_lift", "main_elbow_flex", "main_wrist_flex", "main_wrist_roll", "main_gripper" ] }, "observation.images.robot": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.top": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "observation.images.side": { "dtype": "video", "shape": [ 480, 640, 3 ], "names": [ "height", "width", "channels" ], "info": { "video.fps": 30.0, "video.height": 480, "video.width": 640, "video.channels": 3, "video.codec": "h264", "video.pix_fmt": "yuv420p", "video.is_depth_map": false, "has_audio": false } }, "timestamp": { "dtype": "float32", "shape": [ 1 ], "names": null }, "frame_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "episode_index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "index": { "dtype": "int64", "shape": [ 1 ], "names": null }, "task_index": { "dtype": "int64", "shape": [ 1 ], "names": null } } } ``` ## Citation **BibTeX:** ```bibtex [More Information Needed] ```