aoxo
/

Image-to-Image
English
art
File size: 42,493 Bytes
2696d54
1
2
{"cells":[{"cell_type":"code","execution_count":2,"metadata":{"_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","execution":{"iopub.execute_input":"2024-10-06T11:55:01.869575Z","iopub.status.busy":"2024-10-06T11:55:01.869020Z","iopub.status.idle":"2024-10-06T12:01:26.763321Z","shell.execute_reply":"2024-10-06T12:01:26.761884Z","shell.execute_reply.started":"2024-10-06T11:55:01.869513Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-10-06 11:55:03--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/cyberpunk-china-losangeles.json\n","Resolving huggingface.co (huggingface.co)... 13.35.7.38, 13.35.7.5, 13.35.7.81, ...\n","Connecting to huggingface.co (huggingface.co)|13.35.7.38|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/9a58f0a5e8a2fc10305947ba00a91dcf29a951e47b28e6c57067c78f45795904?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27cyberpunk-china-losangeles.json%3B+filename%3D%22cyberpunk-china-losangeles.json%22%3B&response-content-type=application%2Fjson&Expires=1728474903&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkwM319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvOWE1OGYwYTVlOGEyZmMxMDMwNTk0N2JhMDBhOTFkY2YyOWE5NTFlNDdiMjhlNmM1NzA2N2M3OGY0NTc5NTkwND9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=llK3D2g8SpqiXKEuAVC-YBqDaIRyYG4tqMNBZVZxXUn25ez6Yk--YyCtbJHNKRjRvCKN-CWVdJ2LstDfPYbfxnXY7LpsFx475TjDm886i%7EMCU7c6Ca3xLar4lKCxaeV-Sfe1N7UbbuEMMLIzGvHPDj-5fRczDHdblAVQSgWvPPgbwa2Fr7AK-G-dAZqyfamKdNMn8tHGgiQsMRUNd8JWpGvHaob-A8Yob-2%7EcS511BI9H1X6p0yNHxyycUWxubDHJcY1SR988Gc0gJYFJOmWsg4t-uq9Ouaugbt4IkxRKfcYylWgGoUlXxQkvTgMTFtPj2Q8KG3L8SVsgNbSaqG8Dg__&Key-Pair-Id=K24J24Z295AEI9 [following]\n","--2024-10-06 11:55:03--  https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/9a58f0a5e8a2fc10305947ba00a91dcf29a951e47b28e6c57067c78f45795904?response-content-disposition=inline%3B+filename*%3DUTF-8''cyberpunk-china-losangeles.json%3B+filename%3D%22cyberpunk-china-losangeles.json%22%3B&response-content-type=application%2Fjson&Expires=1728474903&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkwM319LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvOWE1OGYwYTVlOGEyZmMxMDMwNTk0N2JhMDBhOTFkY2YyOWE5NTFlNDdiMjhlNmM1NzA2N2M3OGY0NTc5NTkwND9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=llK3D2g8SpqiXKEuAVC-YBqDaIRyYG4tqMNBZVZxXUn25ez6Yk--YyCtbJHNKRjRvCKN-CWVdJ2LstDfPYbfxnXY7LpsFx475TjDm886i~MCU7c6Ca3xLar4lKCxaeV-Sfe1N7UbbuEMMLIzGvHPDj-5fRczDHdblAVQSgWvPPgbwa2Fr7AK-G-dAZqyfamKdNMn8tHGgiQsMRUNd8JWpGvHaob-A8Yob-2~cS511BI9H1X6p0yNHxyycUWxubDHJcY1SR988Gc0gJYFJOmWsg4t-uq9Ouaugbt4IkxRKfcYylWgGoUlXxQkvTgMTFtPj2Q8KG3L8SVsgNbSaqG8Dg__&Key-Pair-Id=K24J24Z295AEI9\n","Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 54.230.71.29, 54.230.71.70, 54.230.71.17, ...\n","Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|54.230.71.29|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 595116505 (568M) [application/json]\n","Saving to: 'cyberpunk-china-losangeles.json'\n","\n","cyberpunk-china-los 100%[===================>] 567.55M  23.5MB/s    in 25s     \n","\n","2024-10-06 11:55:29 (22.3 MB/s) - 'cyberpunk-china-losangeles.json' saved [595116505/595116505]\n","\n","--2024-10-06 11:55:30--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/gtav-mapillary.json\n","Resolving huggingface.co (huggingface.co)... 54.230.71.103, 54.230.71.28, 54.230.71.56, ...\n","Connecting to huggingface.co (huggingface.co)|54.230.71.103|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 8668343 (8.3M) [text/plain]\n","Saving to: 'gtav-mapillary.json'\n","\n","gtav-mapillary.json 100%[===================>]   8.27M  20.7MB/s    in 0.4s    \n","\n","2024-10-06 11:55:30 (20.7 MB/s) - 'gtav-mapillary.json' saved [8668343/8668343]\n","\n","--2024-10-06 11:55:32--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/gtav_features.zip\n","Resolving huggingface.co (huggingface.co)... 54.230.71.56, 54.230.71.103, 54.230.71.28, ...\n","Connecting to huggingface.co (huggingface.co)|54.230.71.56|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/2c946d285f5d4e82ba2a447b5af902b78d309afbfa241edf751ca2741c45a45c?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27gtav_features.zip%3B+filename%3D%22gtav_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728474932&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkzMn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMmM5NDZkMjg1ZjVkNGU4MmJhMmE0NDdiNWFmOTAyYjc4ZDMwOWFmYmZhMjQxZWRmNzUxY2EyNzQxYzQ1YTQ1Yz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=oV-Uck8yapC6jCPCiZd%7Eje-VrSnhm4-ULjmync8XgLzonjXzKMU6y8ogj61zGKsAxKgKh3E30j4k65f7ajzY7Tzl1bYOxMzmcG7YATsvHjfgl8gGXb2p8z6p1k7K7KK5T%7EwPalxgNoR2Zb-m0fLNHUHZudEi7457PaVmvRxNc-Z4JbJGCeEc5vr5VpD6jWoU7VEVw3l3m1vTu7zMVNN%7EUEeMyjHpqM5f6Bo6Q3zyeT7BsLCmA9NdynZul9D9no0ME4fEFoDJSbtEyLnud86yrrLWIPD1MZuSbVb9r-UtzOabpyZNIN9-xU88etMt9nHJ4ByOeKVH-1f-znAUVXcP3g__&Key-Pair-Id=K24J24Z295AEI9 [following]\n","--2024-10-06 11:55:32--  https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/2c946d285f5d4e82ba2a447b5af902b78d309afbfa241edf751ca2741c45a45c?response-content-disposition=inline%3B+filename*%3DUTF-8''gtav_features.zip%3B+filename%3D%22gtav_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728474932&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkzMn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMmM5NDZkMjg1ZjVkNGU4MmJhMmE0NDdiNWFmOTAyYjc4ZDMwOWFmYmZhMjQxZWRmNzUxY2EyNzQxYzQ1YTQ1Yz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=oV-Uck8yapC6jCPCiZd~je-VrSnhm4-ULjmync8XgLzonjXzKMU6y8ogj61zGKsAxKgKh3E30j4k65f7ajzY7Tzl1bYOxMzmcG7YATsvHjfgl8gGXb2p8z6p1k7K7KK5T~wPalxgNoR2Zb-m0fLNHUHZudEi7457PaVmvRxNc-Z4JbJGCeEc5vr5VpD6jWoU7VEVw3l3m1vTu7zMVNN~UEeMyjHpqM5f6Bo6Q3zyeT7BsLCmA9NdynZul9D9no0ME4fEFoDJSbtEyLnud86yrrLWIPD1MZuSbVb9r-UtzOabpyZNIN9-xU88etMt9nHJ4ByOeKVH-1f-znAUVXcP3g__&Key-Pair-Id=K24J24Z295AEI9\n","Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 13.35.7.76, 13.35.7.51, 13.35.7.77, ...\n","Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|13.35.7.76|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 237736635 (227M) [application/zip]\n","Saving to: 'gtav_features.zip'\n","\n","gtav_features.zip   100%[===================>] 226.72M  52.4MB/s    in 4.2s    \n","\n","2024-10-06 11:55:36 (53.5 MB/s) - 'gtav_features.zip' saved [237736635/237736635]\n","\n","--2024-10-06 11:55:38--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/cyberpunk_features.zip\n","Resolving huggingface.co (huggingface.co)... 54.230.71.28, 54.230.71.103, 54.230.71.56, ...\n","Connecting to huggingface.co (huggingface.co)|54.230.71.28|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/0f4744e1ed68c728180ac125b3e2976a03efb517322c8af9d9051ddb69ba3ece?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27cyberpunk_features.zip%3B+filename%3D%22cyberpunk_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728474938&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkzOH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMGY0NzQ0ZTFlZDY4YzcyODE4MGFjMTI1YjNlMjk3NmEwM2VmYjUxNzMyMmM4YWY5ZDkwNTFkZGI2OWJhM2VjZT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=m523XW7PPAwQCTlZEQ0rjCgsX7vlJW1qEOugtAINYfZNvJnsj82jvwvYUjXwNrWad7ZJBpCJME7R7daH8GwaTc0l3J%7EQJwWvRbsXniPfK0OK6HR3E2NjfnpiL5gyl-HyZ7ytTP3JPw0sZrHlRkSiDM-U84Yl%7EqJOpYoiRN8K-wvMT5i2BFAiipNds-SWLknIDpRS8Vpt8ZuOju4iHAG9f4yjr7ayOV1oS23Z1LfkGjS-omisrmKS0m6XFQxql4YuerBBw%7EQU%7E4BU2Hwdc4pq%7E1GPaDALqOQC8KvpPQR-E7Yb-14XKENocFuaRLtiorZQJ7H8lNjup-uelZPmuwVa-g__&Key-Pair-Id=K24J24Z295AEI9 [following]\n","--2024-10-06 11:55:38--  https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/0f4744e1ed68c728180ac125b3e2976a03efb517322c8af9d9051ddb69ba3ece?response-content-disposition=inline%3B+filename*%3DUTF-8''cyberpunk_features.zip%3B+filename%3D%22cyberpunk_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728474938&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NDkzOH19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMGY0NzQ0ZTFlZDY4YzcyODE4MGFjMTI1YjNlMjk3NmEwM2VmYjUxNzMyMmM4YWY5ZDkwNTFkZGI2OWJhM2VjZT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=m523XW7PPAwQCTlZEQ0rjCgsX7vlJW1qEOugtAINYfZNvJnsj82jvwvYUjXwNrWad7ZJBpCJME7R7daH8GwaTc0l3J~QJwWvRbsXniPfK0OK6HR3E2NjfnpiL5gyl-HyZ7ytTP3JPw0sZrHlRkSiDM-U84Yl~qJOpYoiRN8K-wvMT5i2BFAiipNds-SWLknIDpRS8Vpt8ZuOju4iHAG9f4yjr7ayOV1oS23Z1LfkGjS-omisrmKS0m6XFQxql4YuerBBw~QU~4BU2Hwdc4pq~1GPaDALqOQC8KvpPQR-E7Yb-14XKENocFuaRLtiorZQJ7H8lNjup-uelZPmuwVa-g__&Key-Pair-Id=K24J24Z295AEI9\n","Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 54.230.71.110, 54.230.71.29, 54.230.71.70, ...\n","Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|54.230.71.110|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 6614554606 (6.2G) [application/zip]\n","Saving to: 'cyberpunk_features.zip'\n","\n","cyberpunk_features. 100%[===================>]   6.16G  23.1MB/s    in 4m 31s  \n","\n","2024-10-06 12:00:09 (23.3 MB/s) - 'cyberpunk_features.zip' saved [6614554606/6614554606]\n","\n","--2024-10-06 12:00:11--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/realworld_features.zip\n","Resolving huggingface.co (huggingface.co)... 54.230.71.2, 54.230.71.56, 54.230.71.103, ...\n","Connecting to huggingface.co (huggingface.co)|54.230.71.2|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/5e0f858caa0e6d6321904b3a65378e7bdb240013a469cb46fc424bfdf156d3e0?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27realworld_features.zip%3B+filename%3D%22realworld_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728475212&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NTIxMn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvNWUwZjg1OGNhYTBlNmQ2MzIxOTA0YjNhNjUzNzhlN2JkYjI0MDAxM2E0NjljYjQ2ZmM0MjRiZmRmMTU2ZDNlMD9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=Fu44JqVZ0OUc6CcsZjFDW6rw3rR6hI0YtRPfETjfHZInv3sTEp5AQxlEVum2B8jNhdx0M%7EmQDSzgxVOW0oZXDVe-VxL-%7EMpQT0N5zlWnM6Rcloj%7EizZ5o-9or4DK1zCD5F1O91GQfNV6GC0dJ8Waf01hnG7vAaHn3Nh1tFz15AS1jO9GkNAs85K1f0WrqQaMGGVZ8EURl6sIenJ4oJjll3BJAcERZnfT80U83Voiq1Y5iBVyrmQEx-1YCiYRyS2vMsmT0J8%7ETSywKlkfWFhy6Bo1MHgE1vtelG5EXtPo9zygsIyAimD-41aLATX9dHWNdJiI5Oi5ilxWaDKnJMFf2w__&Key-Pair-Id=K24J24Z295AEI9 [following]\n","--2024-10-06 12:00:12--  https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/5e0f858caa0e6d6321904b3a65378e7bdb240013a469cb46fc424bfdf156d3e0?response-content-disposition=inline%3B+filename*%3DUTF-8''realworld_features.zip%3B+filename%3D%22realworld_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728475212&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NTIxMn19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvNWUwZjg1OGNhYTBlNmQ2MzIxOTA0YjNhNjUzNzhlN2JkYjI0MDAxM2E0NjljYjQ2ZmM0MjRiZmRmMTU2ZDNlMD9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=Fu44JqVZ0OUc6CcsZjFDW6rw3rR6hI0YtRPfETjfHZInv3sTEp5AQxlEVum2B8jNhdx0M~mQDSzgxVOW0oZXDVe-VxL-~MpQT0N5zlWnM6Rcloj~izZ5o-9or4DK1zCD5F1O91GQfNV6GC0dJ8Waf01hnG7vAaHn3Nh1tFz15AS1jO9GkNAs85K1f0WrqQaMGGVZ8EURl6sIenJ4oJjll3BJAcERZnfT80U83Voiq1Y5iBVyrmQEx-1YCiYRyS2vMsmT0J8~TSywKlkfWFhy6Bo1MHgE1vtelG5EXtPo9zygsIyAimD-41aLATX9dHWNdJiI5Oi5ilxWaDKnJMFf2w__&Key-Pair-Id=K24J24Z295AEI9\n","Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 54.230.71.17, 54.230.71.29, 54.230.71.70, ...\n","Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|54.230.71.17|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 10427511890 (9.7G) [application/zip]\n","Saving to: 'realworld_features.zip'\n","\n","realworld_features. 100%[===================>]   9.71G   148MB/s    in 67s     \n","\n","2024-10-06 12:01:19 (148 MB/s) - 'realworld_features.zip' saved [10427511890/10427511890]\n","\n","--2024-10-06 12:01:21--  https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/mapillary_features.zip\n","Resolving huggingface.co (huggingface.co)... 13.35.7.81, 13.35.7.5, 13.35.7.57, ...\n","Connecting to huggingface.co (huggingface.co)|13.35.7.81|:443... connected.\n","HTTP request sent, awaiting response... 302 Found\n","Location: https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/28db37e20154bdd208c2a2143782939f18e0814c9acbeb1de0270e9d6f797197?response-content-disposition=inline%3B+filename*%3DUTF-8%27%27mapillary_features.zip%3B+filename%3D%22mapillary_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728475281&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NTI4MX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMjhkYjM3ZTIwMTU0YmRkMjA4YzJhMjE0Mzc4MjkzOWYxOGUwODE0YzlhY2JlYjFkZTAyNzBlOWQ2Zjc5NzE5Nz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=X6hpnDek9cSTq79jQimX1IkJ4m2m3tWT9qe3%7EzdMQEs5sjAX-UgoOq%7EwbsOhS02uHVjYjmSATfw-uQh71fba5WAZjH%7E1%7EstANJluOWNzfljOqypIAYIXSaTkCDjXAjDoOq8wbvujq-XQHYpXnGuHZbjNNhZFE8hBANPm6%7EhcfYp6U4wNGHfMP9TxS3wg9X5mdL%7Ennssv5sSCmtjB3IPA-SPOZWdyBf06UqX1EZvrY4hwhIBMAcloLbh6nVqEi7HjOD2PfFyUXNt7%7E%7Eqij6i-SVQ-l1YbewKb7UaPSh3xLG5EeD-9qHYPjVEnQYPDqNipJzDj5TdxvmIbOnGRO2wAeA__&Key-Pair-Id=K24J24Z295AEI9 [following]\n","--2024-10-06 12:01:21--  https://cdn-lfs-us-1.hf.co/repos/98/f1/98f153cc73597dd81851aa830af167335f564ee495a5b75514c395f89203aa08/28db37e20154bdd208c2a2143782939f18e0814c9acbeb1de0270e9d6f797197?response-content-disposition=inline%3B+filename*%3DUTF-8''mapillary_features.zip%3B+filename%3D%22mapillary_features.zip%22%3B&response-content-type=application%2Fzip&Expires=1728475281&Policy=eyJTdGF0ZW1lbnQiOlt7IkNvbmRpdGlvbiI6eyJEYXRlTGVzc1RoYW4iOnsiQVdTOkVwb2NoVGltZSI6MTcyODQ3NTI4MX19LCJSZXNvdXJjZSI6Imh0dHBzOi8vY2RuLWxmcy11cy0xLmhmLmNvL3JlcG9zLzk4L2YxLzk4ZjE1M2NjNzM1OTdkZDgxODUxYWE4MzBhZjE2NzMzNWY1NjRlZTQ5NWE1Yjc1NTE0YzM5NWY4OTIwM2FhMDgvMjhkYjM3ZTIwMTU0YmRkMjA4YzJhMjE0Mzc4MjkzOWYxOGUwODE0YzlhY2JlYjFkZTAyNzBlOWQ2Zjc5NzE5Nz9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoifV19&Signature=X6hpnDek9cSTq79jQimX1IkJ4m2m3tWT9qe3~zdMQEs5sjAX-UgoOq~wbsOhS02uHVjYjmSATfw-uQh71fba5WAZjH~1~stANJluOWNzfljOqypIAYIXSaTkCDjXAjDoOq8wbvujq-XQHYpXnGuHZbjNNhZFE8hBANPm6~hcfYp6U4wNGHfMP9TxS3wg9X5mdL~nnssv5sSCmtjB3IPA-SPOZWdyBf06UqX1EZvrY4hwhIBMAcloLbh6nVqEi7HjOD2PfFyUXNt7~~qij6i-SVQ-l1YbewKb7UaPSh3xLG5EeD-9qHYPjVEnQYPDqNipJzDj5TdxvmIbOnGRO2wAeA__&Key-Pair-Id=K24J24Z295AEI9\n","Resolving cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)... 13.35.7.77, 13.35.7.76, 13.35.7.51, ...\n","Connecting to cdn-lfs-us-1.hf.co (cdn-lfs-us-1.hf.co)|13.35.7.77|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 190592908 (182M) [application/zip]\n","Saving to: 'mapillary_features.zip'\n","\n","mapillary_features. 100%[===================>] 181.76M  34.4MB/s    in 4.9s    \n","\n","2024-10-06 12:01:26 (36.8 MB/s) - 'mapillary_features.zip' saved [190592908/190592908]\n","\n"]}],"source":["!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/cyberpunk-china-losangeles.json\n","!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/gtav-mapillary.json\n","!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/gtav_features.zip\n","!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/cyberpunk_features.zip\n","!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/realworld_features.zip\n","!wget https://huggingface.co./datasets/aoxo/photorealism-style-adapter-gta-v/resolve/main/mapillary_features.zip"]},{"cell_type":"code","execution_count":159,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T14:52:05.628410Z","iopub.status.busy":"2024-10-06T14:52:05.627990Z","iopub.status.idle":"2024-10-06T14:52:17.944735Z","shell.execute_reply":"2024-10-06T14:52:17.943542Z","shell.execute_reply.started":"2024-10-06T14:52:05.628372Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["Requirement already satisfied: einops in /opt/conda/lib/python3.10/site-packages (0.8.0)\n"]}],"source":["import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import math\n","!pip install einops\n","from einops import rearrange\n","\n","# Patch Embedding with Dynamic Positional Encoding\n","class DynamicPatchEmbedding(nn.Module):\n","    def __init__(self, in_channels=2048, patch_size=8, emb_dim=768, img_size=256):\n","        super(DynamicPatchEmbedding, self).__init__()\n","        self.patch_size = patch_size\n","        self.proj = nn.Conv2d(in_channels, emb_dim, kernel_size=patch_size, stride=patch_size)\n","        self.num_patches = (img_size // patch_size) ** 2\n","\n","    def forward(self, x):\n","        # If necessary, reshape the input to 4D as before\n","        if len(x.shape) == 2:  # Input is [batch_size, channels], so reshape it\n","            batch_size = x.shape[0]\n","            channels = 2048  # Assuming 2048 feature channels\n","            h = w = int(math.sqrt(x.shape[1] // channels))  # Infer height and width\n","            x = x.view(batch_size, channels, h, w)  # Reshape to [batch_size, channels, height, width]\n","\n","        # Pass through Conv2d\n","        batch_size = x.shape[0]\n","        x = self.proj(x)  # (batch_size, emb_dim, H/P, W/P)\n","        x = x.flatten(2).transpose(1, 2)  # (batch_size, num_patches, emb_dim)\n","        return x\n","\n","# Style Adaptive Layer Normalization (SALN)\n","class StyleAdaptiveLayerNorm(nn.Module):\n","    def __init__(self, emb_dim):\n","        super(StyleAdaptiveLayerNorm, self).__init__()\n","        self.norm = nn.LayerNorm(emb_dim)\n","        self.fc = nn.Linear(emb_dim, emb_dim * 2)\n","\n","    def forward(self, x, style):\n","        style = self.fc(style).unsqueeze(1)\n","        gamma, beta = style.chunk(2, dim=-1)\n","        normalized_x = self.norm(x)\n","        return gamma * normalized_x + beta\n","\n","# Cross Attention Layer\n","class CrossAttentionLayer(nn.Module):\n","    def __init__(self, emb_dim, num_heads, dropout=0.1):\n","        super(CrossAttentionLayer, self).__init__()\n","        self.attn = nn.MultiheadAttention(embed_dim=emb_dim, num_heads=num_heads, batch_first=True)\n","        self.dropout = nn.Dropout(dropout)\n","\n","    def forward(self, x, context):\n","        attn_output, _ = self.attn(x, context, context)\n","        return self.dropout(attn_output)\n","\n","# Transformer Encoder Block with SALN\n","class TransformerEncoderBlock(nn.Module):\n","    def __init__(self, emb_dim=768, num_heads=8, hidden_dim=2048, dropout=0.1):\n","        super(TransformerEncoderBlock, self).__init__()\n","        self.attn = CrossAttentionLayer(emb_dim, num_heads, dropout=dropout)\n","        self.ff = nn.Sequential(\n","            nn.Linear(emb_dim, hidden_dim),\n","            nn.ReLU(),\n","            nn.Linear(hidden_dim, emb_dim),\n","        )\n","        self.norm1 = StyleAdaptiveLayerNorm(emb_dim)\n","        self.norm2 = StyleAdaptiveLayerNorm(emb_dim)\n","        self.dropout = nn.Dropout(dropout)\n","\n","    def forward(self, x, style):\n","        attn_output = self.attn(x, x)\n","        x = x + self.dropout(attn_output)\n","        x = self.norm1(x, style)\n","\n","        ff_output = self.ff(x)\n","        x = x + self.dropout(ff_output)\n","        x = self.norm2(x, style)\n","        return x\n","\n","# Transformer Decoder Block with SALN\n","class TransformerDecoderBlock(nn.Module):\n","    def __init__(self, emb_dim=768, num_heads=8, hidden_dim=2048, dropout=0.1):\n","        super(TransformerDecoderBlock, self).__init__()\n","        self.attn1 = CrossAttentionLayer(emb_dim, num_heads, dropout=dropout)\n","        self.attn2 = CrossAttentionLayer(emb_dim, num_heads, dropout=dropout)\n","        self.ff = nn.Sequential(\n","            nn.Linear(emb_dim, hidden_dim),\n","            nn.ReLU(),\n","            nn.Linear(hidden_dim, emb_dim),\n","        )\n","        self.norm1 = StyleAdaptiveLayerNorm(emb_dim)\n","        self.norm2 = StyleAdaptiveLayerNorm(emb_dim)\n","        self.norm3 = StyleAdaptiveLayerNorm(emb_dim)\n","\n","    def forward(self, x, enc_output, style):\n","        attn_output1 = self.attn1(x, x)\n","        x = x + attn_output1\n","        x = self.norm1(x, style)\n","\n","        attn_output2 = self.attn2(x, enc_output)\n","        x = x + attn_output2\n","        x = self.norm2(x, style)\n","\n","        ff_output = self.ff(x)\n","        x = x + ff_output\n","        x = self.norm3(x, style)\n","\n","        return x\n","\n","# Swin Transformer Block\n","class SwinTransformerBlock(nn.Module):\n","    def __init__(self, dim, num_heads, window_size=7, shift_size=2):\n","        super(SwinTransformerBlock, self).__init__()\n","        self.attn = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)\n","        self.mlp = nn.Sequential(\n","            nn.Linear(dim, 4 * dim),\n","            nn.GELU(),\n","            nn.Linear(4 * dim, dim)\n","        )\n","        self.norm1 = nn.LayerNorm(dim)\n","        self.norm2 = nn.LayerNorm(dim)\n","\n","    def forward(self, x):\n","        shortcut = x\n","        x = self.norm1(x)\n","        x, _ = self.attn(x, x, x)\n","        x = shortcut + x\n","\n","        shortcut = x\n","        x = self.norm2(x)\n","        x = self.mlp(x)\n","        x = shortcut + x\n","\n","        return x\n","\n","# Refinement Block\n","class RefinementBlock(nn.Module):\n","    def __init__(self, in_channels=768, out_channels=3, kernel_size=3, stride=1, padding=1):\n","        super(RefinementBlock, self).__init__()\n","        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)\n","        self.bn = nn.BatchNorm2d(out_channels)\n","        self.relu = nn.ReLU(inplace=True)\n","\n","    def forward(self, x):\n","        x = self.conv(x)\n","        x = self.bn(x)\n","        x = self.relu(x)\n","        return x\n","\n","# Main ViT Image-to-Image Model with SALN\n","class RealFormerv3(nn.Module):\n","    def __init__(self, img_size=512, patch_size=8, emb_dim=768, num_heads=12, num_layers=12, hidden_dim=3072, window_size=8):\n","        super(RealFormerv3, self).__init__()\n","        self.patch_embed = DynamicPatchEmbedding(in_channels=2048, patch_size=patch_size, emb_dim=emb_dim, img_size=img_size)\n","\n","        self.encoder_layers = nn.ModuleList([TransformerEncoderBlock(emb_dim, num_heads, hidden_dim) for _ in range(num_layers)])\n","        self.decoder_layers = nn.ModuleList([TransformerDecoderBlock(emb_dim, num_heads, hidden_dim) for _ in range(num_layers)])\n","        self.swin_layers = nn.ModuleList([SwinTransformerBlock(emb_dim, num_heads, window_size) for _ in range(num_layers)])\n","\n","        self.refinement = RefinementBlock(in_channels=emb_dim, out_channels=3)\n","        self.final_layer = nn.Conv2d(3, 2048, kernel_size=1)  # Adjust the input channels to 3\n","\n","        # Style encoder\n","        self.style_encoder = nn.Sequential(\n","            nn.Conv2d(2048, emb_dim, kernel_size=3, stride=2, padding=1),\n","            nn.ReLU(),\n","            nn.AdaptiveAvgPool2d(1),\n","            nn.Flatten(),\n","            nn.Linear(emb_dim, emb_dim)\n","        )\n","\n","    def forward(self, content, style):\n","        # Patch embedding and transformer encoder-decoder process the content\n","        x = self.patch_embed(content)\n","\n","        # Reshape style to 4D if necessary\n","        if len(style.shape) == 2:  # Input is [batch_size, channels], so reshape it\n","            batch_size = style.shape[0]\n","            channels = 2048  # Assuming 2048 feature channels\n","            h = w = int(math.sqrt(style.shape[1] // channels))  # Infer height and width\n","            style = style.view(batch_size, channels, h, w)  # Reshape to [batch_size, channels, height, width]\n","\n","        style_features = self.style_encoder(style)\n","\n","        # Transformer encoder with SALN\n","        for encoder in self.encoder_layers:\n","            x = encoder(x, style_features)\n","\n","        # Transformer decoder with SALN\n","        for decoder in self.decoder_layers:\n","            x = decoder(x, x, style_features)  # Using self-attention for now\n","\n","        # Swin Transformer processing\n","        for swin in self.swin_layers:\n","            x = swin(x)\n","\n","        # Reshape x back to 4D (batch_size, channels, height, width) before passing it to Conv2d layers\n","        batch_size, num_patches, emb_dim = x.shape\n","        h = w = int(math.sqrt(num_patches))  # Assuming square patches\n","        x = x.transpose(1, 2).view(batch_size, emb_dim, h, w)\n","\n","        # Final refinement and output layer\n","        x = self.refinement(x)\n","        x = self.final_layer(x)\n","        return x\n","\n","# Loss functions remain the same\n","def total_variation_loss(x):\n","    return torch.sum(torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :])) + torch.sum(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:]))\n","\n","def combined_loss(output, target):\n","    l1_loss = nn.L1Loss()(output, target)\n","    tv_loss = total_variation_loss(output)\n","    return l1_loss + 0.0001 * tv_loss\n","\n","def psnr(img1, img2):\n","    mse = torch.mean((img1 - img2) ** 2)\n","    if mse == 0:\n","        return float('inf')\n","    return 20 * torch.log10(1.0 / torch.sqrt(mse))"]},{"cell_type":"code","execution_count":154,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T14:46:11.045039Z","iopub.status.busy":"2024-10-06T14:46:11.044572Z","iopub.status.idle":"2024-10-06T14:46:11.868049Z","shell.execute_reply":"2024-10-06T14:46:11.867070Z","shell.execute_reply.started":"2024-10-06T14:46:11.044981Z"},"trusted":true},"outputs":[],"source":["from torch.utils.data import Dataset, DataLoader\n","import json\n","\n","class FeatureMapDataset(Dataset):\n","    def __init__(self, frames_dir, real_dir, json_file):\n","        self.frames_dir = frames_dir\n","        self.real_dir = real_dir\n","\n","        with open(json_file, 'r') as f:\n","            self.mappings = json.load(f)\n","\n","        self.frame_files = list(self.mappings.keys())  # List of frame filenames\n","\n","    def __len__(self):\n","        return len(self.frame_files)\n","\n","    def __getitem__(self, idx):\n","        frame_file = self.frame_files[idx]\n","        real_images = self.mappings[frame_file]\n","\n","        # Load frame feature map\n","        frame_feature = torch.load(os.path.join(self.frames_dir, frame_file))\n","\n","        # Load top real world image feature maps\n","        real_features = [torch.load(os.path.join(self.real_dir, img[0])) for img in real_images]\n","\n","        # Extract the top real image and its similarity score\n","        top_real_feature = real_features[0]\n","        top_similarity = real_images[0][1]\n","\n","        return frame_feature, top_real_feature, top_similarity, real_features[1:]\n","\n","# Define data loaders\n","frames_dir = '/kaggle/working/frames_features/'\n","real_dir = '/kaggle/working/real_features/'\n","json_file = '/kaggle/working/*.json'  # or gtav_mapillary.json\n","\n","dataset = FeatureMapDataset(frames_dir, real_dir, json_file)\n","dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=4)"]},{"cell_type":"code","execution_count":151,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T14:45:42.810459Z","iopub.status.busy":"2024-10-06T14:45:42.810043Z","iopub.status.idle":"2024-10-06T14:45:42.815339Z","shell.execute_reply":"2024-10-06T14:45:42.814119Z","shell.execute_reply.started":"2024-10-06T14:45:42.810415Z"},"trusted":true},"outputs":[],"source":["import torch\n","torch.cuda.empty_cache()"]},{"cell_type":"code","execution_count":null,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T15:51:43.264775Z","iopub.status.busy":"2024-10-06T15:51:43.263890Z","iopub.status.idle":"2024-10-06T15:51:43.422577Z","shell.execute_reply":"2024-10-06T15:51:43.421024Z","shell.execute_reply.started":"2024-10-06T15:51:43.264719Z"},"trusted":true},"outputs":[],"source":["import os\n","\n","def setup_distributed():\n","    dist.init_process_group(backend='nccl')\n","    torch.cuda.set_device(args.local_rank)\n","\n","def contrastive_loss(anchor, positive, negatives, margin=0.2):\n","    # Cosine similarity between anchor and positive (\n","    pos_sim = F.cosine_similarity(anchor, positive, dim=-1)\n","\n","    # Cosine similarity between anchor and all negative examples\n","    neg_sims = [F.cosine_similarity(anchor, neg, dim=-1) for neg in negatives]\n","\n","    # Calculate loss\n","    loss = 0.0\n","    for neg_sim in neg_sims:\n","        loss += torch.clamp(margin + neg_sim - pos_sim, min=0.0)  # Margin-based contrastive loss\n","\n","    return loss.mean()\n","\n","# Training script\n","def train_contrastive(model, dataloader, optimizer, num_epochs=10, margin=0.2):\n","    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","    model = nn.DataParallel(model, device_ids = [0,1])\n","    model.to(device)\n","\n","    best_loss = float('inf')\n","\n","    for epoch in range(num_epochs):\n","        model.train()\n","        running_loss = 0.0\n","        running_psnr = 0.0\n","\n","        for batch_idx, (frame_feature, top_real_feature, top_similarity, other_real_features) in enumerate(dataloader):\n","            frame_feature = frame_feature.to(device)\n","            top_real_feature = top_real_feature.to(device)\n","            other_real_features = [neg.to(device) for neg in other_real_features]\n","\n","            optimizer.zero_grad()\n","\n","            # Forward pass\n","            output = model(frame_feature, top_real_feature)\n","\n","            # Compute contrastive loss\n","            loss = contrastive_loss(output, top_real_feature, other_real_features, margin=margin)\n","\n","            # Backpropagation and optimization\n","            loss.backward()\n","            optimizer.step()\n","\n","            running_loss += loss.item()\n","\n","            # PSNR metric computation\n","            psnr_value = psnr(output, top_real_feature)\n","            running_psnr += psnr_value\n","\n","            # Print training status\n","            if batch_idx % 10 == 0:\n","                print(f\"Epoch [{epoch+1}/{num_epochs}], Batch [{batch_idx}/{len(dataloader)}], Loss: {loss.item()}, PSNR: {psnr_value:.4f}\")\n","\n","        # Epoch-level metrics\n","        epoch_loss = running_loss / len(dataloader)\n","        avg_psnr = running_psnr / len(dataloader)\n","\n","        print(f\"Epoch [{epoch+1}/{num_epochs}], Avg Loss: {epoch_loss:.4f}, Avg PSNR: {avg_psnr:.4f}\")\n","\n","        # Save the best model\n","        if epoch_loss < best_loss:\n","            best_loss = epoch_loss\n","            torch.save(model.state_dict(), 'realformerv3,pth')\n","            print(f\"Model saved at epoch {epoch+1} with loss {best_loss:.4f}\")\n","\n","# Optimizer setup\n","model = RealFormerv3(img_size=256, patch_size=1, emb_dim=768, num_heads=42, num_layers=16, hidden_dim=3072)\n","optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n","\n","# Start training\n","train_contrastive(model, dataloader, optimizer, num_epochs=50, margin=0.2)"]},{"cell_type":"code","execution_count":174,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T15:15:47.858959Z","iopub.status.busy":"2024-10-06T15:15:47.858119Z","iopub.status.idle":"2024-10-06T15:16:25.296512Z","shell.execute_reply":"2024-10-06T15:16:25.295405Z","shell.execute_reply.started":"2024-10-06T15:15:47.858916Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.\n","Token is valid (permission: write).\n","Your token has been saved to /root/.cache/huggingface/token\n","Login successful\n","Uploaded realformerv3.pth to aoxo/RealFormer repository.\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"3b488b0a2dc34f85b52499acf25d688e","version_major":2,"version_minor":0},"text/plain":["realformerv3_fp16.pth:   0%|          | 0.00/505M [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Uploaded realformerv3_fp16.pth to aoxo/RealFormer repository.\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"a9d47a6bb5ca49e681dc8080bb73270a","version_major":2,"version_minor":0},"text/plain":["realformerv3_bf16.pth:   0%|          | 0.00/505M [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Uploaded realformerv3_bf16.pth to aoxo/RealFormer repository.\n"]},{"data":{"application/vnd.jupyter.widget-view+json":{"model_id":"89d1de6e88e04f2fae83061bcbce9d35","version_major":2,"version_minor":0},"text/plain":["realformerv3_int8.pth:   0%|          | 0.00/344M [00:00<?, ?B/s]"]},"metadata":{},"output_type":"display_data"},{"name":"stdout","output_type":"stream","text":["Uploaded realformerv3_int8.pth to aoxo/RealFormer repository.\n"]}],"source":["import os\n","from huggingface_hub import login, HfApi\n","\n","# Login to Hugging Face Hub\n","login(token=\"\")\n","\n","# Initialize the Hugging Face API\n","api = HfApi()\n","\n","# Specify the directory containing the models\n","model_directory = \"/kaggle/working/\"\n","repo_id = \"aoxo/RealFormer\"\n","repo_type = \"model\"\n","\n","# Loop through all files in the model directory\n","for filename in os.listdir(model_directory):\n","    # Only upload files that end with .pth\n","    if filename.endswith(\".pth\"):\n","        file_path = os.path.join(model_directory, filename)\n","        path_in_repo = filename  # Use the same filename in the repo\n","        \n","        # Upload the model file to the repository\n","        api.upload_file(\n","            path_or_fileobj=file_path,\n","            path_in_repo=path_in_repo,\n","            repo_id=repo_id,\n","            repo_type=repo_type,\n","        )\n","        print(f\"Uploaded {filename} to {repo_id} repository.\")"]},{"cell_type":"code","execution_count":177,"metadata":{"execution":{"iopub.execute_input":"2024-10-06T15:51:48.356977Z","iopub.status.busy":"2024-10-06T15:51:48.355877Z","iopub.status.idle":"2024-10-06T15:51:59.708112Z","shell.execute_reply":"2024-10-06T15:51:59.706952Z","shell.execute_reply.started":"2024-10-06T15:51:48.356933Z"},"trusted":true},"outputs":[{"name":"stdout","output_type":"stream","text":["252617225\n","RealFormerv3(\n","  (patch_embed): DynamicPatchEmbedding(\n","    (proj): Conv2d(2048, 768, kernel_size=(1, 1), stride=(1, 1))\n","  )\n","  (encoder_layers): ModuleList(\n","    (0-7): 8 x TransformerEncoderBlock(\n","      (attn): CrossAttentionLayer(\n","        (attn): MultiheadAttention(\n","          (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n","        )\n","        (dropout): Dropout(p=0.1, inplace=False)\n","      )\n","      (ff): Sequential(\n","        (0): Linear(in_features=768, out_features=3072, bias=True)\n","        (1): ReLU()\n","        (2): Linear(in_features=3072, out_features=768, bias=True)\n","      )\n","      (norm1): StyleAdaptiveLayerNorm(\n","        (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","        (fc): Linear(in_features=768, out_features=1536, bias=True)\n","      )\n","      (norm2): StyleAdaptiveLayerNorm(\n","        (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","        (fc): Linear(in_features=768, out_features=1536, bias=True)\n","      )\n","      (dropout): Dropout(p=0.1, inplace=False)\n","    )\n","  )\n","  (decoder_layers): ModuleList(\n","    (0-7): 8 x TransformerDecoderBlock(\n","      (attn1): CrossAttentionLayer(\n","        (attn): MultiheadAttention(\n","          (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n","        )\n","        (dropout): Dropout(p=0.1, inplace=False)\n","      )\n","      (attn2): CrossAttentionLayer(\n","        (attn): MultiheadAttention(\n","          (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n","        )\n","        (dropout): Dropout(p=0.1, inplace=False)\n","      )\n","      (ff): Sequential(\n","        (0): Linear(in_features=768, out_features=3072, bias=True)\n","        (1): ReLU()\n","        (2): Linear(in_features=3072, out_features=768, bias=True)\n","      )\n","      (norm1): StyleAdaptiveLayerNorm(\n","        (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","        (fc): Linear(in_features=768, out_features=1536, bias=True)\n","      )\n","      (norm2): StyleAdaptiveLayerNorm(\n","        (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","        (fc): Linear(in_features=768, out_features=1536, bias=True)\n","      )\n","      (norm3): StyleAdaptiveLayerNorm(\n","        (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","        (fc): Linear(in_features=768, out_features=1536, bias=True)\n","      )\n","    )\n","  )\n","  (swin_layers): ModuleList(\n","    (0-7): 8 x SwinTransformerBlock(\n","      (attn): MultiheadAttention(\n","        (out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)\n","      )\n","      (mlp): Sequential(\n","        (0): Linear(in_features=768, out_features=3072, bias=True)\n","        (1): GELU(approximate='none')\n","        (2): Linear(in_features=3072, out_features=768, bias=True)\n","      )\n","      (norm1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","      (norm2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n","    )\n","  )\n","  (refinement): RefinementBlock(\n","    (conv): Conv2d(768, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n","    (bn): BatchNorm2d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n","    (relu): ReLU(inplace=True)\n","  )\n","  (final_layer): Conv2d(3, 2048, kernel_size=(1, 1), stride=(1, 1))\n","  (style_encoder): Sequential(\n","    (0): Conv2d(2048, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n","    (1): ReLU()\n","    (2): AdaptiveAvgPool2d(output_size=1)\n","    (3): Flatten(start_dim=1, end_dim=-1)\n","    (4): Linear(in_features=768, out_features=768, bias=True)\n","  )\n",")\n"]}],"source":["total_params = sum(p.numel() for p in model.parameters())\n","print(total_params)\n","print(model)\n","torch.save(model.state_dict(), 'realformerv3.pth')\n","# Convert model to FP16 and save\n","model.half()\n","torch.save(model.state_dict(), 'realformerv3_fp16.pth')\n","# Convert model to BF16 and save\n","model.to(torch.bfloat16)\n","torch.save(model.state_dict(), 'realformerv3_bf16.pth')\n","import torch.quantization as quantization\n","\n","# Apply static quantization to the model\n","model_int8 = quantization.quantize_dynamic(\n","    model, {torch.nn.Linear}, dtype=torch.qint8\n",")\n","\n","# Save the INT8 quantized model\n","torch.save(model_int8.state_dict(), 'realformerv3_int8.pth')"]}],"metadata":{"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"datasetId":5825636,"sourceId":9559983,"sourceType":"datasetVersion"}],"dockerImageVersionId":30786,"isGpuEnabled":true,"isInternetEnabled":true,"language":"python","sourceType":"notebook"},"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.10.14"}},"nbformat":4,"nbformat_minor":4}