Ozgur Unlu commited on
Commit
4740b6e
·
1 Parent(s): 715a349

more tests to see the probşem with enabling hardware

Browse files
.config/accelerate/default_config.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ distributed_type: 'NO'
3
+ downcast_bf16: 'no'
4
+ gpu_ids: all
5
+ machine_rank: 0
6
+ main_training_function: main
7
+ mixed_precision: 'no'
8
+ num_machines: 1
9
+ num_processes: 1
10
+ rdzv_backend: static
11
+ same_network: true
12
+ tpu_env: []
13
+ tpu_use_cluster: false
14
+ tpu_use_sudo: false
15
+ use_cpu: false
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
2
+
3
+ # Set up system
4
+ RUN apt-get update && apt-get install -y \
5
+ python3-pip \
6
+ python3-dev \
7
+ git \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Set working directory
11
+ WORKDIR /code
12
+
13
+ # Copy requirements first to leverage Docker cache
14
+ COPY requirements.txt .
15
+
16
+ # Install dependencies
17
+ RUN pip3 install --no-cache-dir -r requirements.txt
18
+
19
+ # Copy the rest of the application
20
+ COPY . .
21
+
22
+ # Set environment variables
23
+ ENV PYTHONUNBUFFERED=1
24
+ ENV GRADIO_SERVER_NAME=0.0.0.0
25
+ ENV GRADIO_SERVER_PORT=7860
26
+
27
+ # Command to run the application
28
+ CMD ["python3", "app.py"]
app.py CHANGED
@@ -1,6 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import easyocr
3
- import torch
4
  from transformers import pipeline, DistilBertTokenizer, DistilBertForSequenceClassification
5
  import numpy as np
6
  from PIL import Image
 
1
+ import torch
2
+ import sys
3
+ import subprocess
4
+
5
+ def check_gpu_status():
6
+ print("Python version:", sys.version)
7
+ print("PyTorch version:", torch.__version__)
8
+ print("CUDA available:", torch.cuda.is_available())
9
+ print("CUDA version:", torch.version.cuda if torch.cuda.is_available() else "Not available")
10
+
11
+ if torch.cuda.is_available():
12
+ print("GPU Device:", torch.cuda.get_device_name(0))
13
+ print("GPU Memory:", torch.cuda.get_device_properties(0).total_memory / 1024**3, "GB")
14
+
15
+ try:
16
+ nvidia_smi = subprocess.check_output(["nvidia-smi"])
17
+ print("nvidia-smi output:")
18
+ print(nvidia_smi.decode())
19
+ except:
20
+ print("nvidia-smi not available")
21
+
22
+ # Run GPU check at startup
23
+ print("=== GPU Status Check ===")
24
+ check_gpu_status()
25
+ print("======================")
26
+
27
+ # Rest of your imports
28
  import gradio as gr
29
  import easyocr
 
30
  from transformers import pipeline, DistilBertTokenizer, DistilBertForSequenceClassification
31
  import numpy as np
32
  from PIL import Image
requirements.txt CHANGED
@@ -1,8 +1,10 @@
1
- --extra-index-url https://download.pytorch.org/whl/cu113
2
- torch
 
 
3
  gradio
4
- transformers
5
  easyocr
 
6
  pandas
7
  numpy
8
  pillow
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu117
2
+ torch==2.0.1+cu117
3
+ torchvision==0.15.2+cu117
4
+ torchaudio==2.0.2+cu117
5
  gradio
 
6
  easyocr
7
+ transformers
8
  pandas
9
  numpy
10
  pillow
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.10