flonga35 commited on
Commit
bbae066
1 Parent(s): 39e1c7c
app_gradio/265f.png ADDED
app_gradio/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ## Image to Fen
2
+
3
+ [check out the GitHub repo](https://github.com/DerekLiu35/ChessCV).
app_gradio/app.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provide an image of a chessboard and get the FEN (https://en.wikipedia.org/wiki/Forsyth–Edwards_Notation ) representation of the board."""
2
+ import argparse
3
+ import json
4
+ import logging
5
+ import os
6
+ from pathlib import Path
7
+ from typing import Callable
8
+
9
+ import gradio as gr
10
+ from PIL import ImageStat
11
+ from PIL.Image import Image
12
+ import requests
13
+
14
+ from image_to_fen.fen import ImageToFen
15
+ import image_to_fen.util as util
16
+
17
+ os.environ["CUDA_VISIBLE_DEVICES"] = "" # do not use GPU
18
+
19
+ logging.basicConfig(level=logging.INFO)
20
+
21
+ APP_DIR = Path(__file__).resolve().parent
22
+ FAVICON = APP_DIR / "265f.png"
23
+ README = APP_DIR / "README.md"
24
+
25
+ DEFAULT_PORT = 11700
26
+
27
+
28
+ def main(args):
29
+ predictor = PredictorBackend(url=args.model_url)
30
+ frontend = make_frontend(
31
+ predictor.run
32
+ )
33
+ frontend.launch(
34
+ server_name="0.0.0.0", # noqa: S104
35
+ server_port=args.port, # set a port to bind to, failing if unavailable
36
+ share=True,
37
+ favicon_path=FAVICON,
38
+ )
39
+
40
+
41
+ def make_frontend(
42
+ fn: Callable[[Image], str],
43
+ app_name: str = "image-to-fen"
44
+ ):
45
+ """Creates a gradio.Interface frontend for an image to text function."""
46
+ examples_dir = Path("image_to_fen") / "tests" / "support" / "boards"
47
+ example_fnames = [elem for elem in os.listdir(examples_dir) if elem.endswith(".png")]
48
+ example_paths = [examples_dir / fname for fname in example_fnames]
49
+ examples = [[str(path)] for path in example_paths]
50
+
51
+ allow_flagging = "never"
52
+
53
+ readme = _load_readme(with_logging=allow_flagging == "manual")
54
+
55
+ # build a basic browser interface to a Python function
56
+ frontend = gr.Interface(
57
+ fn=fn,
58
+ outputs=gr.components.Textbox(),
59
+ inputs=gr.components.Image(type="pil", label="Chess Board"),
60
+ title="♟️ Image to Fen",
61
+ thumbnail="FAVICON",
62
+ description=__doc__,
63
+ article=readme,
64
+ examples=examples,
65
+ cache_examples=False,
66
+ allow_flagging=allow_flagging
67
+ )
68
+
69
+ return frontend
70
+
71
+
72
+ class PredictorBackend:
73
+ """Interface to a backend that serves predictions.
74
+
75
+ To communicate with a backend accessible via a URL, provide the url kwarg.
76
+
77
+ Otherwise, runs a predictor locally.
78
+ """
79
+
80
+ def __init__(self, url=None):
81
+ if url is not None:
82
+ self.url = url
83
+ self._predict = self._predict_from_endpoint
84
+ else:
85
+ model = ImageToFen()
86
+ self._predict = model.predict
87
+
88
+ def run(self, image):
89
+ pred, metrics = self._predict_with_metrics(image)
90
+ self._log_inference(pred, metrics)
91
+ return pred
92
+
93
+ def _predict_with_metrics(self, image):
94
+ pred = self._predict(image)
95
+
96
+ stats = ImageStat.Stat(image)
97
+ metrics = {
98
+ "image_mean_intensity": stats.mean,
99
+ "image_median": stats.median,
100
+ "image_extrema": stats.extrema,
101
+ "image_area": image.size[0] * image.size[1],
102
+ "pred_length": len(pred),
103
+ }
104
+ return pred, metrics
105
+
106
+ def _predict_from_endpoint(self, image):
107
+ """Send an image to an endpoint that accepts JSON and return the predicted text.
108
+
109
+ The endpoint should expect a base64 representation of the image, encoded as a string,
110
+ under the key "image". It should return the predicted text under the key "pred".
111
+
112
+ Parameters
113
+ ----------
114
+ image
115
+ A PIL image of a chess board.
116
+
117
+ Returns
118
+ -------
119
+ pred
120
+ A string containing the predictor's guess of the FEN representation of the chess board.
121
+ """
122
+ encoded_image = util.encode_b64_image(image)
123
+
124
+ headers = {"Content-type": "application/json"}
125
+ payload = json.dumps({"image": "data:image/png;base64," + encoded_image})
126
+
127
+ response = requests.post(self.url, data=payload, headers=headers)
128
+ pred = response.json()["pred"]
129
+
130
+ return pred
131
+
132
+ def _log_inference(self, pred, metrics):
133
+ for key, value in metrics.items():
134
+ logging.info(f"METRIC {key} {value}")
135
+ logging.info(f"PRED >begin\n{pred}\nPRED >end")
136
+
137
+
138
+ def _load_readme(with_logging=False):
139
+ with open(README) as f:
140
+ lines = f.readlines()
141
+ # if not with_logging:
142
+ # lines = lines[: lines.index("<!-- logging content below -->\n")]
143
+
144
+ readme = "".join(lines)
145
+ return readme
146
+
147
+
148
+ def _make_parser():
149
+ parser = argparse.ArgumentParser(description=__doc__)
150
+ parser.add_argument(
151
+ "--model_url",
152
+ default=None,
153
+ type=str,
154
+ help="Identifies a URL to which to send image data. Data is base64-encoded, converted to a utf-8 string, and then set via a POST request as JSON with the key 'image'. Default is None, which instead sends the data to a model running locally.",
155
+ )
156
+ parser.add_argument(
157
+ "--port",
158
+ default=DEFAULT_PORT,
159
+ type=int,
160
+ help=f"Port on which to expose this server. Default is {DEFAULT_PORT}.",
161
+ )
162
+
163
+ return parser
164
+
165
+
166
+ if __name__ == "__main__":
167
+ parser = _make_parser()
168
+ args = parser.parse_args()
169
+ main(args)
image_to_fen/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Modules for creating and running image to fen."""
image_to_fen/artifacts/image-to-fen/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9623198845b7c54ad9f3ada08d6cd87a036f5a3fd49a63d03f7e35e3d59bc3d
3
+ size 166133826
image_to_fen/fen.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from pathlib import Path
3
+ from typing import Sequence, Union
4
+
5
+ from PIL import Image
6
+ import torch
7
+ import torchvision
8
+ import numpy as np
9
+ import re
10
+
11
+ import image_to_fen.util as util
12
+
13
+
14
+ STAGED_MODEL_DIRNAME = Path(__file__).resolve().parent / "artifacts" / "image-to-fen"
15
+ MODEL_FILE = "model.pt"
16
+
17
+ class ImageToFen:
18
+ """Takes image of chess board and returns FEN string."""
19
+
20
+ def __init__(self, model_path=None):
21
+ if model_path is None:
22
+ model_path = STAGED_MODEL_DIRNAME / MODEL_FILE
23
+ self.model = torch.jit.load(model_path)
24
+
25
+ @torch.no_grad()
26
+ def predict(self, image: Union[str, Path, Image.Image]) -> str:
27
+ """Predict FEN string for image of chess board."""
28
+ image = image
29
+ if not isinstance(image, Image.Image):
30
+ image = util.read_image_pil(image, grayscale=True)
31
+ image = image.resize((200, 200))
32
+ image = torchvision.transforms.PILToTensor()(image)/255
33
+ pred = self.model([image])[1][0]
34
+ nms_pred = apply_nms(pred, iou_thresh=0.2)
35
+ pred_str = boxes_labels_to_fen(nms_pred['boxes'], nms_pred['labels'])
36
+ return pred_str
37
+
38
+ def apply_nms(orig_prediction, iou_thresh=0.3):
39
+
40
+ # torchvision returns the indices of the bboxes to keep
41
+ keep = torchvision.ops.nms(orig_prediction['boxes'], orig_prediction['scores'], iou_thresh)
42
+
43
+ final_prediction = orig_prediction
44
+ final_prediction['boxes'] = final_prediction['boxes'][keep]
45
+ final_prediction['scores'] = final_prediction['scores'][keep]
46
+ final_prediction['labels'] = final_prediction['labels'][keep]
47
+
48
+ return final_prediction
49
+
50
+ def boxes_labels_to_fen(boxes, labels, square_size=25):
51
+ boxes = torch.round(boxes / 25) * 25
52
+ eye = np.eye(13)
53
+ one_hot = onehot_from_fen("8-8-8-8-8-8-8-8")
54
+ for i, box in enumerate(boxes):
55
+ x = box[0]
56
+ y = box[1]
57
+ ind = int((x / square_size) + (y / square_size) * 8)
58
+ if (ind >= 64):
59
+ continue
60
+ one_hot[ind] = eye[12 - labels[i]].reshape((1, 13)).astype(int)
61
+ return fen_from_onehot(one_hot)
62
+
63
+ def onehot_from_fen(fen):
64
+ piece_symbols = 'prbnkqPRBNKQ'
65
+ eye = np.eye(13)
66
+ output = np.empty((0, 13))
67
+ fen = re.sub('[-]', '', fen)
68
+
69
+ for char in fen:
70
+ if(char in '12345678'):
71
+ output = np.append(
72
+ output, np.tile(eye[12], (int(char), 1)), axis=0)
73
+ else:
74
+ idx = piece_symbols.index(char)
75
+ output = np.append(output, eye[idx].reshape((1, 13)), axis=0)
76
+
77
+ return output
78
+
79
+ def fen_from_onehot(one_hot):
80
+ piece_symbols = 'prbnkqPRBNKQ'
81
+ output = ''
82
+ for j in range(8):
83
+ for i in range(8):
84
+ idx = np.where(one_hot[j*8 + i]==1)[0][0]
85
+ if(idx == 12):
86
+ output += ' '
87
+ else:
88
+ output += piece_symbols[idx]
89
+ if(j != 7):
90
+ output += '-'
91
+
92
+ for i in range(8, 0, -1):
93
+ output = output.replace(' ' * i, str(i))
94
+
95
+ return output
96
+
97
+ def main():
98
+ """Run prediction on image."""
99
+ parser = argparse.ArgumentParser(description="Predict FEN string for image of chess board.")
100
+ parser.add_argument("image", type=Path, help="Path to image file.")
101
+ parser.add_argument("--model-path", type=Path, help="Path to model file.")
102
+ args = parser.parse_args()
103
+ image_to_fen = ImageToFen(args.model_path)
104
+ pred = image_to_fen.predict(args.image)
105
+ print(f"Prediction: {pred}")
106
+
107
+ # image_to_fen/tests/support/boards/phpSrRLQ1.png
108
+ if __name__ == "__main__":
109
+ main()
image_to_fen/tests/support/boards/board2.png ADDED
image_to_fen/tests/support/boards/board3.png ADDED
image_to_fen/tests/support/boards/board4.png ADDED
image_to_fen/tests/support/boards/board5.png ADDED
image_to_fen/tests/support/boards/phpSrRLQ1.png ADDED
image_to_fen/util.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility functions for image_to_fen module."""
2
+ import base64
3
+ import contextlib
4
+ import hashlib
5
+ from io import BytesIO
6
+ import os
7
+ from pathlib import Path
8
+ from typing import Union
9
+ from urllib.request import urlretrieve
10
+
11
+ import numpy as np
12
+ from PIL import Image
13
+ import smart_open
14
+ from tqdm import tqdm
15
+
16
+
17
+ def to_categorical(y, num_classes):
18
+ """1-hot encode a tensor."""
19
+ return np.eye(num_classes, dtype="uint8")[y]
20
+
21
+
22
+ def read_image_pil(image_uri: Union[Path, str], grayscale=False) -> Image:
23
+ with smart_open.open(image_uri, "rb") as image_file:
24
+ return read_image_pil_file(image_file, grayscale)
25
+
26
+
27
+ def read_image_pil_file(image_file, grayscale=False) -> Image:
28
+ with Image.open(image_file) as image:
29
+ if grayscale:
30
+ image = image.convert(mode="L")
31
+ else:
32
+ image = image.convert(mode=image.mode)
33
+ return image
34
+
35
+
36
+ @contextlib.contextmanager
37
+ def temporary_working_directory(working_dir: Union[str, Path]):
38
+ """Temporarily switches to a directory, then returns to the original directory on exit."""
39
+ curdir = os.getcwd()
40
+ os.chdir(working_dir)
41
+ try:
42
+ yield
43
+ finally:
44
+ os.chdir(curdir)
45
+
46
+ def encode_b64_image(image, format="png"):
47
+ """Encode a PIL image as a base64 string."""
48
+ _buffer = BytesIO() # bytes that live in memory
49
+ image.save(_buffer, format=format) # but which we write to like a file
50
+ encoded_image = base64.b64encode(_buffer.getvalue()).decode("utf8")
51
+ return encoded_image
52
+
53
+
54
+ def compute_sha256(filename: Union[Path, str]):
55
+ """Return SHA256 checksum of a file."""
56
+ with open(filename, "rb") as f:
57
+ return hashlib.sha256(f.read()).hexdigest()
58
+
59
+
60
+ class TqdmUpTo(tqdm):
61
+ """From https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py"""
62
+
63
+ def update_to(self, blocks=1, bsize=1, tsize=None):
64
+ """
65
+ Parameters
66
+ ----------
67
+ blocks: int, optional
68
+ Number of blocks transferred so far [default: 1].
69
+ bsize: int, optional
70
+ Size of each block (in tqdm units) [default: 1].
71
+ tsize: int, optional
72
+ Total size (in tqdm units). If [default: None] remains unchanged.
73
+ """
74
+ if tsize is not None:
75
+ self.total = tsize
76
+ self.update(blocks * bsize - self.n) # will also set self.n = b * bsize
77
+
78
+
79
+ def download_url(url, filename):
80
+ """Download a file from url to filename, with a progress bar."""
81
+ with TqdmUpTo(unit="B", unit_scale=True, unit_divisor=1024, miniters=1) as t:
82
+ urlretrieve(url, filename, reporthook=t.update_to, data=None) # noqa: S310
83
+
84
+ # the function takes the original prediction and the iou threshold.
85
+
86
+ # function to convert a torchtensor back to PIL image
87
+ def torch_to_pil(img):
88
+ return torchvision.transforms.ToPILImage()(img).convert('RGB')
requirements.txt ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # This file is autogenerated by pip-compile with Python 3.10
3
+ # by the following command:
4
+ #
5
+ # pip-compile requirements/prod.in
6
+ #
7
+ aiofiles==23.2.1
8
+ # via gradio
9
+ aiohttp==3.8.5
10
+ # via gradio
11
+ aiosignal==1.3.1
12
+ # via aiohttp
13
+ altair==5.0.1
14
+ # via gradio
15
+ annotated-types==0.5.0
16
+ # via pydantic
17
+ anyio==3.7.1
18
+ # via
19
+ # httpcore
20
+ # starlette
21
+ async-timeout==4.0.3
22
+ # via aiohttp
23
+ attrs==23.1.0
24
+ # via
25
+ # aiohttp
26
+ # jsonschema
27
+ # referencing
28
+ boto3==1.28.34
29
+ # via
30
+ # boto3-extensions
31
+ # smart-open
32
+ boto3-extensions==0.20.0
33
+ # via gantry
34
+ botocore==1.31.34
35
+ # via
36
+ # boto3
37
+ # boto3-extensions
38
+ # s3transfer
39
+ cachetools==4.2.4
40
+ # via gantry
41
+ certifi==2023.7.22
42
+ # via
43
+ # httpcore
44
+ # httpx
45
+ # requests
46
+ charset-normalizer==3.2.0
47
+ # via
48
+ # aiohttp
49
+ # requests
50
+ click==8.1.7
51
+ # via
52
+ # gantry
53
+ # uvicorn
54
+ click-spinner==0.1.10
55
+ # via gantry
56
+ cmake==3.27.2
57
+ # via triton
58
+ colorama==0.4.6
59
+ # via
60
+ # gantry
61
+ # halo
62
+ # log-symbols
63
+ contourpy==1.1.0
64
+ # via matplotlib
65
+ cycler==0.11.0
66
+ # via matplotlib
67
+ dateparser==1.1.8
68
+ # via gantry
69
+ exceptiongroup==1.1.3
70
+ # via anyio
71
+ fastapi==0.101.1
72
+ # via gradio
73
+ ffmpy==0.3.1
74
+ # via gradio
75
+ filelock==3.12.2
76
+ # via
77
+ # huggingface-hub
78
+ # torch
79
+ # triton
80
+ fonttools==4.42.1
81
+ # via matplotlib
82
+ frozenlist==1.4.0
83
+ # via
84
+ # aiohttp
85
+ # aiosignal
86
+ fsspec==2023.6.0
87
+ # via
88
+ # gradio-client
89
+ # huggingface-hub
90
+ gantry==0.4.9
91
+ # via -r requirements/prod.in
92
+ gradio==3.40.1
93
+ # via -r requirements/prod.in
94
+ gradio-client==0.5.0
95
+ # via gradio
96
+ h11==0.14.0
97
+ # via
98
+ # httpcore
99
+ # uvicorn
100
+ h5py==3.9.0
101
+ # via -r requirements/prod.in
102
+ halo==0.0.31
103
+ # via gantry
104
+ httpcore==0.17.3
105
+ # via httpx
106
+ httpx==0.24.1
107
+ # via
108
+ # gradio
109
+ # gradio-client
110
+ huggingface-hub==0.16.4
111
+ # via
112
+ # gradio
113
+ # gradio-client
114
+ idna==3.4
115
+ # via
116
+ # anyio
117
+ # httpx
118
+ # requests
119
+ # yarl
120
+ importlib-metadata==6.8.0
121
+ # via -r requirements/prod.in
122
+ importlib-resources==6.0.1
123
+ # via gradio
124
+ isodate==0.6.1
125
+ # via gantry
126
+ jinja2==3.1.2
127
+ # via
128
+ # -r requirements/prod.in
129
+ # altair
130
+ # gradio
131
+ # torch
132
+ jmespath==1.0.1
133
+ # via
134
+ # boto3
135
+ # botocore
136
+ jsonschema==4.19.0
137
+ # via altair
138
+ jsonschema-specifications==2023.7.1
139
+ # via jsonschema
140
+ kiwisolver==1.4.5
141
+ # via matplotlib
142
+ linkify-it-py==2.0.2
143
+ # via markdown-it-py
144
+ lit==16.0.6
145
+ # via triton
146
+ log-symbols==0.0.14
147
+ # via halo
148
+ markdown-it-py[linkify]==2.2.0
149
+ # via
150
+ # gradio
151
+ # mdit-py-plugins
152
+ markupsafe==2.1.3
153
+ # via
154
+ # gradio
155
+ # jinja2
156
+ marshmallow==3.20.1
157
+ # via
158
+ # gantry
159
+ # marshmallow-oneofschema
160
+ marshmallow-oneofschema==3.0.1
161
+ # via gantry
162
+ matplotlib==3.7.2
163
+ # via gradio
164
+ mdit-py-plugins==0.3.3
165
+ # via gradio
166
+ mdurl==0.1.2
167
+ # via markdown-it-py
168
+ monotonic==1.6
169
+ # via gantry
170
+ mpmath==1.3.0
171
+ # via sympy
172
+ multidict==6.0.4
173
+ # via
174
+ # aiohttp
175
+ # yarl
176
+ networkx==3.1
177
+ # via torch
178
+ numpy==1.25.2
179
+ # via
180
+ # -r requirements/prod.in
181
+ # altair
182
+ # contourpy
183
+ # gantry
184
+ # gradio
185
+ # h5py
186
+ # matplotlib
187
+ # pandas
188
+ # torchvision
189
+ nvidia-cublas-cu11==11.10.3.66
190
+ # via
191
+ # nvidia-cudnn-cu11
192
+ # nvidia-cusolver-cu11
193
+ # torch
194
+ nvidia-cuda-cupti-cu11==11.7.101
195
+ # via torch
196
+ nvidia-cuda-nvrtc-cu11==11.7.99
197
+ # via torch
198
+ nvidia-cuda-runtime-cu11==11.7.99
199
+ # via torch
200
+ nvidia-cudnn-cu11==8.5.0.96
201
+ # via torch
202
+ nvidia-cufft-cu11==10.9.0.58
203
+ # via torch
204
+ nvidia-curand-cu11==10.2.10.91
205
+ # via torch
206
+ nvidia-cusolver-cu11==11.4.0.1
207
+ # via torch
208
+ nvidia-cusparse-cu11==11.7.4.91
209
+ # via torch
210
+ nvidia-nccl-cu11==2.14.3
211
+ # via torch
212
+ nvidia-nvtx-cu11==11.7.91
213
+ # via torch
214
+ orjson==3.9.5
215
+ # via gradio
216
+ packaging==23.1
217
+ # via
218
+ # gradio
219
+ # gradio-client
220
+ # huggingface-hub
221
+ # marshmallow
222
+ # matplotlib
223
+ pandas==2.0.3
224
+ # via
225
+ # altair
226
+ # gantry
227
+ # gradio
228
+ pillow==9.4.0
229
+ # via
230
+ # -r requirements/prod.in
231
+ # gradio
232
+ # matplotlib
233
+ # torchvision
234
+ pydantic==2.3.0
235
+ # via
236
+ # fastapi
237
+ # gradio
238
+ pydantic-core==2.6.3
239
+ # via pydantic
240
+ pydub==0.25.1
241
+ # via gradio
242
+ pyngrok==6.0.0
243
+ # via -r requirements/prod.in
244
+ pyparsing==3.0.9
245
+ # via matplotlib
246
+ python-dateutil==2.8.2
247
+ # via
248
+ # botocore
249
+ # dateparser
250
+ # gantry
251
+ # matplotlib
252
+ # pandas
253
+ python-multipart==0.0.6
254
+ # via gradio
255
+ pytz==2023.3
256
+ # via
257
+ # dateparser
258
+ # pandas
259
+ pyyaml==6.0.1
260
+ # via
261
+ # gantry
262
+ # gradio
263
+ # huggingface-hub
264
+ # pyngrok
265
+ referencing==0.30.2
266
+ # via
267
+ # jsonschema
268
+ # jsonschema-specifications
269
+ regex==2023.8.8
270
+ # via dateparser
271
+ requests==2.31.0
272
+ # via
273
+ # -r requirements/prod.in
274
+ # gantry
275
+ # gradio
276
+ # gradio-client
277
+ # huggingface-hub
278
+ # torchvision
279
+ rpds-py==0.9.2
280
+ # via
281
+ # jsonschema
282
+ # referencing
283
+ s3transfer==0.6.2
284
+ # via boto3
285
+ semantic-version==2.10.0
286
+ # via gradio
287
+ six==1.16.0
288
+ # via
289
+ # halo
290
+ # isodate
291
+ # python-dateutil
292
+ smart-open[s3]==6.3.0
293
+ # via -r requirements/prod.in
294
+ sniffio==1.3.0
295
+ # via
296
+ # anyio
297
+ # httpcore
298
+ # httpx
299
+ spinners==0.0.24
300
+ # via halo
301
+ starlette==0.27.0
302
+ # via fastapi
303
+ sympy==1.12
304
+ # via torch
305
+ tabulate==0.9.0
306
+ # via gantry
307
+ termcolor==2.3.0
308
+ # via halo
309
+ toolz==0.12.0
310
+ # via altair
311
+ torch==2.0.1
312
+ # via
313
+ # -r requirements/prod.in
314
+ # torchvision
315
+ # triton
316
+ torchvision==0.15.2
317
+ # via -r requirements/prod.in
318
+ tqdm==4.66.1
319
+ # via
320
+ # -r requirements/prod.in
321
+ # gantry
322
+ # huggingface-hub
323
+ triton==2.0.0
324
+ # via torch
325
+ typeguard==2.13.3
326
+ # via gantry
327
+ typing-extensions==4.7.1
328
+ # via
329
+ # altair
330
+ # fastapi
331
+ # gantry
332
+ # gradio
333
+ # gradio-client
334
+ # huggingface-hub
335
+ # pydantic
336
+ # pydantic-core
337
+ # torch
338
+ # uvicorn
339
+ tzdata==2023.3
340
+ # via pandas
341
+ tzlocal==5.0.1
342
+ # via dateparser
343
+ uc-micro-py==1.0.2
344
+ # via linkify-it-py
345
+ urllib3==1.26.16
346
+ # via
347
+ # botocore
348
+ # requests
349
+ uvicorn==0.23.2
350
+ # via gradio
351
+ websockets==11.0.3
352
+ # via
353
+ # gradio
354
+ # gradio-client
355
+ wheel==0.41.2
356
+ # via
357
+ # nvidia-cublas-cu11
358
+ # nvidia-cuda-cupti-cu11
359
+ # nvidia-cuda-runtime-cu11
360
+ # nvidia-curand-cu11
361
+ # nvidia-cusparse-cu11
362
+ # nvidia-nvtx-cu11
363
+ yarl==1.9.2
364
+ # via aiohttp
365
+ zipp==3.16.2
366
+ # via importlib-metadata
367
+
368
+ # The following packages are considered to be unsafe in a requirements file:
369
+ # setuptools