yeq6x commited on
Commit
77c44cd
·
1 Parent(s): 6090dd8
Files changed (3) hide show
  1. Dockerfile +0 -4
  2. docker-compose.yml +0 -4
  3. scripts/generate_prompt.py +0 -14
Dockerfile CHANGED
@@ -20,10 +20,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
20
  COPY requirements.txt /app/requirements.txt
21
  RUN pip install --no-cache-dir -r requirements.txt
22
 
23
- # 環境変数を設定
24
- ENV PATH="/usr/local/cuda/bin:${PATH}"
25
- ENV XLA_FLAGS="--xla_gpu_cuda_data_dir=/usr/local/cuda"
26
-
27
  # アプリケーションコードをコンテナにコピー
28
  COPY . /app
29
 
 
20
  COPY requirements.txt /app/requirements.txt
21
  RUN pip install --no-cache-dir -r requirements.txt
22
 
 
 
 
 
23
  # アプリケーションコードをコンテナにコピー
24
  COPY . /app
25
 
docker-compose.yml CHANGED
@@ -14,7 +14,3 @@ services:
14
  - capabilities: [ "gpu" ]
15
  count: 1
16
  driver: nvidia
17
- runtime: nvidia
18
- environment:
19
- - NVIDIA_VISIBLE_DEVICES=all
20
- - NVIDIA_DRIVER_CAPABILITIES=compute,utility
 
14
  - capabilities: [ "gpu" ]
15
  count: 1
16
  driver: nvidia
 
 
 
 
scripts/generate_prompt.py CHANGED
@@ -7,20 +7,6 @@ from huggingface_hub import hf_hub_download
7
 
8
  import spaces
9
 
10
- import tensorflow as tf
11
-
12
- gpus = tf.config.list_physical_devices('GPU')
13
- if gpus:
14
- try:
15
- for gpu in gpus:
16
- tf.config.experimental.set_memory_growth(gpu, True)
17
- print("GPUs are available and memory growth is set.")
18
- except RuntimeError as e:
19
- print("Error setting GPU memory growth:", e)
20
- else:
21
- print("No GPUs are available.")
22
-
23
-
24
  # 画像サイズの設定
25
  IMAGE_SIZE = 448
26
 
 
7
 
8
  import spaces
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # 画像サイズの設定
11
  IMAGE_SIZE = 448
12