evals-for-every-language / requirements_dev.txt
David Pomerenke
Separate requirements_dev.txt file
d15babf
raw
history blame
5.62 kB
# This file was autogenerated by uv via the following command:
# uv pip compile pyproject.toml -o requirements_dev.txt --extra dev
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.11.18
# via
# datasets
# fsspec
aiolimiter==1.2.1
# via languagebench (pyproject.toml)
aiosignal==1.3.2
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.9.0
# via
# httpx
# openai
# starlette
async-timeout==5.0.1
# via aiohttp
attrs==25.3.0
# via aiohttp
bert-score==0.3.13
# via languagebench (pyproject.toml)
certifi==2025.4.26
# via
# httpcore
# httpx
# requests
charset-normalizer==3.4.2
# via requests
click==8.1.8
# via
# jiwer
# uvicorn
colorama==0.4.6
# via sacrebleu
contourpy==1.3.2
# via matplotlib
cycler==0.12.1
# via matplotlib
datasets==3.5.1
# via evaluate
dill==0.3.8
# via
# datasets
# evaluate
# multiprocess
distro==1.9.0
# via openai
elevenlabs==1.57.0
# via languagebench (pyproject.toml)
evaluate==0.4.0
# via languagebench (pyproject.toml)
exceptiongroup==1.2.2
# via anyio
fastapi==0.115.12
# via languagebench (pyproject.toml)
filelock==3.18.0
# via
# datasets
# huggingface-hub
# torch
# transformers
fonttools==4.57.0
# via matplotlib
frozenlist==1.6.0
# via
# aiohttp
# aiosignal
fsspec==2025.3.0
# via
# datasets
# evaluate
# huggingface-hub
# torch
h11==0.16.0
# via
# httpcore
# uvicorn
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via
# elevenlabs
# openai
huggingface-hub==0.30.2
# via
# languagebench (pyproject.toml)
# datasets
# evaluate
# tokenizers
# transformers
idna==3.10
# via
# anyio
# httpx
# requests
# yarl
jinja2==3.1.6
# via torch
jiter==0.9.0
# via openai
jiwer==3.1.0
# via languagebench (pyproject.toml)
joblib==1.5.0
# via languagebench (pyproject.toml)
kiwisolver==1.4.8
# via matplotlib
langcodes==3.5.0
# via languagebench (pyproject.toml)
language-data==1.3.0
# via
# languagebench (pyproject.toml)
# langcodes
lxml==5.4.0
# via sacrebleu
marisa-trie==1.2.1
# via language-data
markdown-it-py==3.0.0
# via rich
markupsafe==3.0.2
# via jinja2
matplotlib==3.10.1
# via bert-score
mdurl==0.1.2
# via markdown-it-py
mpmath==1.3.0
# via sympy
multidict==6.4.3
# via
# aiohttp
# yarl
multiprocess==0.70.16
# via
# datasets
# evaluate
networkx==3.4.2
# via torch
numpy==2.2.5
# via
# languagebench (pyproject.toml)
# bert-score
# contourpy
# datasets
# evaluate
# matplotlib
# pandas
# sacrebleu
# transformers
openai==1.77.0
# via languagebench (pyproject.toml)
packaging==25.0
# via
# bert-score
# datasets
# evaluate
# huggingface-hub
# matplotlib
# transformers
pandas==2.2.3
# via
# languagebench (pyproject.toml)
# bert-score
# datasets
# evaluate
pillow==11.2.1
# via matplotlib
portalocker==3.1.1
# via sacrebleu
propcache==0.3.1
# via
# aiohttp
# yarl
protobuf==6.30.2
# via languagebench (pyproject.toml)
pyarrow==20.0.0
# via datasets
pydantic==2.11.4
# via
# elevenlabs
# fastapi
# openai
pydantic-core==2.33.2
# via
# elevenlabs
# pydantic
pygments==2.19.1
# via rich
pyparsing==3.2.3
# via matplotlib
python-dateutil==2.9.0.post0
# via
# matplotlib
# pandas
python-dotenv==1.1.0
# via languagebench (pyproject.toml)
pytz==2025.2
# via pandas
pyyaml==6.0.2
# via
# datasets
# huggingface-hub
# transformers
rapidfuzz==3.13.0
# via jiwer
regex==2024.11.6
# via
# sacrebleu
# tiktoken
# transformers
requests==2.32.3
# via
# bert-score
# datasets
# elevenlabs
# evaluate
# huggingface-hub
# responses
# tiktoken
# transformers
responses==0.18.0
# via evaluate
rich==14.0.0
# via languagebench (pyproject.toml)
sacrebleu==2.5.1
# via languagebench (pyproject.toml)
safetensors==0.5.3
# via transformers
sentencepiece==0.2.0
# via languagebench (pyproject.toml)
setuptools==80.3.1
# via marisa-trie
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# openai
starlette==0.46.2
# via fastapi
sympy==1.14.0
# via torch
tabulate==0.9.0
# via sacrebleu
tiktoken==0.9.0
# via languagebench (pyproject.toml)
tokenizers==0.21.1
# via transformers
torch==2.7.0
# via bert-score
tqdm==4.67.1
# via
# languagebench (pyproject.toml)
# bert-score
# datasets
# evaluate
# huggingface-hub
# openai
# transformers
transformers==4.51.3
# via
# languagebench (pyproject.toml)
# bert-score
typing-extensions==4.13.2
# via
# anyio
# elevenlabs
# fastapi
# huggingface-hub
# multidict
# openai
# pydantic
# pydantic-core
# rich
# torch
# typing-inspection
# uvicorn
typing-inspection==0.4.0
# via pydantic
tzdata==2025.2
# via pandas
urllib3==2.4.0
# via
# requests
# responses
uvicorn==0.34.2
# via languagebench (pyproject.toml)
websockets==15.0.1
# via elevenlabs
xxhash==3.5.0
# via
# datasets
# evaluate
yarl==1.20.0
# via aiohttp