oceansweep commited on
Commit
9586789
·
verified ·
1 Parent(s): ad545f3

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +32 -42
Dockerfile CHANGED
@@ -1,26 +1,7 @@
1
- # This is the same dockerfile from `Helper_Files/Dockerfiles/tldw-nvidia_amd64_Dockerfile`. c/p here so people see a 'Dockerfile' in the root directory and know what to do.
2
- # Usage
3
- # docker build -t tldw-nvidia_amd64 .
4
- # docker run --gpus=all -p 7860:7860 -v tldw_volume:/tldw tldw-nvidia_amd64
5
- #
6
- # If the above command doesn't work and it hangs on start, use the following command:
7
- #
8
- # sudo docker run -it -p 7860:7860 -v tldw_volume:/tdlw tldw-nvidia_amd64 bash
9
- #
10
- # Once in the container, run the following command:
11
- #
12
- # python summarize.py -gui
13
- #
14
- # And you should be good.
15
-
16
- # Use Nvidia image:
17
  FROM nvidia/cuda:12.6.1-cudnn-runtime-ubuntu24.04
18
 
19
- # Use an official Python runtime as a parent image
20
- #FROM python:3.10.15-slim-bookworm
21
-
22
-
23
- # Set build arguments
24
  ARG REPO_URL=https://github.com/rmusser01/tldw.git
25
  ARG BRANCH=main
26
  ARG GPU_SUPPORT=cpu
@@ -35,29 +16,34 @@ RUN apt-get update && apt-get install -y \
35
  python3-pyaudio \
36
  portaudio19-dev \
37
  python3-pip \
38
- portaudio19-dev \
39
  python3-venv \
40
  && rm -rf /var/lib/apt/lists/*
41
 
42
- # Set the working directory in the container
43
- WORKDIR /tldw
44
 
45
- # Clone the repository
46
- RUN git clone -b ${BRANCH} ${REPO_URL} .
47
 
48
- # Create and activate virtual environment
49
- RUN python3 -m venv ./venv
50
- ENV PATH="/tldw/venv/bin:$PATH"
51
 
52
- # Upgrade pip and install wheel
53
- RUN pip install --upgrade pip wheel
54
 
55
- # Install CUDA
56
- RUN pip install nvidia-cublas-cu12 nvidia-cudnn-cu12
57
 
58
- # setup PATH
59
- RUN export LD_LIBRARY_PATH=`python3 -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'`
 
 
 
 
60
 
 
 
61
 
62
  # Install PyTorch based on GPU support
63
  RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \
@@ -68,22 +54,26 @@ RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \
68
  pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu; \
69
  fi
70
 
71
- # Install other requirements
72
- RUN pip install -r requirements.txt
73
 
74
  # Update config.txt for CPU if needed
75
  RUN if [ "$GPU_SUPPORT" = "cpu" ]; then \
76
  sed -i 's/cuda/cpu/' ./Config_Files/config.txt; \
77
  fi
78
 
 
 
 
 
79
  # Create a volume for persistent storage
80
- VOLUME /tldw
81
 
82
- # Make port 7860 available to the world outside this container
83
  EXPOSE 7860
84
 
85
- # Set listening to all interfaces
86
  ENV GRADIO_SERVER_NAME="0.0.0.0"
87
 
88
- # Run the application
89
- CMD ["python", "summarize.py", "-gui", "-log DEBUG"]
 
1
+ # Use Nvidia CUDA runtime as the base image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  FROM nvidia/cuda:12.6.1-cudnn-runtime-ubuntu24.04
3
 
4
+ # Set build arguments for repository configuration
 
 
 
 
5
  ARG REPO_URL=https://github.com/rmusser01/tldw.git
6
  ARG BRANCH=main
7
  ARG GPU_SUPPORT=cpu
 
16
  python3-pyaudio \
17
  portaudio19-dev \
18
  python3-pip \
 
19
  python3-venv \
20
  && rm -rf /var/lib/apt/lists/*
21
 
22
+ # Create a new user named "user" with user ID 1000
23
+ RUN useradd -m -u 1000 user
24
 
25
+ # Switch to the "user" user
26
+ USER user
27
 
28
+ # Set environment variables for the user's home directory and PATH
29
+ ENV HOME=/home/user \
30
+ PATH=/home/user/.local/bin:$PATH
31
 
32
+ # Set the working directory to the user's app directory
33
+ WORKDIR $HOME/app
34
 
35
+ # Upgrade pip and install wheel as the non-root user
36
+ RUN pip install --no-cache-dir --upgrade pip wheel
37
 
38
+ # Clone the repository into the working directory
39
+ RUN git clone -b ${BRANCH} ${REPO_URL} .
40
+
41
+ # Create and activate a virtual environment
42
+ RUN python3 -m venv venv
43
+ ENV PATH="/home/user/app/venv/bin:$PATH"
44
 
45
+ # Install CUDA libraries
46
+ RUN pip install --no-cache-dir nvidia-cublas-cu12 nvidia-cudnn-cu12
47
 
48
  # Install PyTorch based on GPU support
49
  RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \
 
54
  pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu; \
55
  fi
56
 
57
+ # Install other Python dependencies
58
+ RUN pip install --no-cache-dir -r requirements.txt
59
 
60
  # Update config.txt for CPU if needed
61
  RUN if [ "$GPU_SUPPORT" = "cpu" ]; then \
62
  sed -i 's/cuda/cpu/' ./Config_Files/config.txt; \
63
  fi
64
 
65
+ # Download a checkpoint (replace <SOME_ASSET_URL> and <SOME_ASSET_NAME> with actual values)
66
+ RUN mkdir -p content
67
+ ADD --chown=user https://<SOME_ASSET_URL> content/<SOME_ASSET_NAME>
68
+
69
  # Create a volume for persistent storage
70
+ VOLUME /home/user/app/tldw_volume
71
 
72
+ # Expose port 7860 to the outside world
73
  EXPOSE 7860
74
 
75
+ # Set environment variable for Gradio to listen on all interfaces
76
  ENV GRADIO_SERVER_NAME="0.0.0.0"
77
 
78
+ # Define the default command to run the application
79
+ CMD ["python", "summarize.py", "-gui"]