Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 3,859 Bytes
3ca0269 cdb6f86 e52146b fd6fd81 e52146b 3ca0269 879455c e52146b 5dd2af5 719ed9c ae2cf82 0ed5b20 e52146b 879455c 3ca0269 879455c 3ca0269 879455c e52146b 879455c fd6fd81 e52146b 879455c e52146b 879455c e52146b 879455c 9cfce0d 879455c 2f64630 879455c faf4ba4 879455c e52146b fd6fd81 e52146b 879455c 9052a89 3ca0269 879455c 9052a89 24b3b53 879455c 24b3b53 879455c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
# Supported values:
# - VIDEOCHAIN
# - REPLICATE
# - INFERENCE_ENDPOINT
# - INFERENCE_API
# - OPENAI
RENDERING_ENGINE="INFERENCE_API"
# Supported values:
# - INFERENCE_ENDPOINT
# - INFERENCE_API
# - OPENAI
LLM_ENGINE="INFERENCE_API"
NEXT_PUBLIC_MAX_NB_PAGES="2"
# Not implemented for the Inference API yet - you can submit a PR if you have some ideas
NEXT_PUBLIC_CAN_UPSCALE="false"
# Not implemented for the Inference API yet - you can submit a PR if you have some ideas
NEXT_PUBLIC_CAN_REDRAW="false"
# Set to "true" to create artificial delays and smooth out traffic
NEXT_PUBLIC_ENABLE_RATE_LIMITER="false"
# ------------- HUGGING FACE OAUTH -------------
NEXT_PUBLIC_ENABLE_HUGGING_FACE_OAUTH="false"
NEXT_PUBLIC_HUGGING_FACE_OAUTH_CLIENT_ID=""
HUGGING_FACE_OAUTH_SECRET=""
# ------------- PROVIDER AUTH ------------
# You only need to configure the access token(s) for the provider(s) you want to use
# HuggingFace.co token: available for the LLM engine and the RENDERING engine
AUTH_HF_API_TOKEN=
# Replicate.com token: available for the RENDERING engine
AUTH_REPLICATE_API_TOKEN=
# OpenAI.dom token: available for the LLM engine and the RENDERING engine
AUTH_OPENAI_API_KEY=
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
AUTH_VIDEOCHAIN_API_TOKEN=
# ------------- RENDERING API CONFIG --------------
# If you decided to use Replicate for the RENDERING engine
RENDERING_REPLICATE_API_MODEL="stabilityai/sdxl"
RENDERING_REPLICATE_API_MODEL_VERSION="da77bc59ee60423279fd632efb4795ab731d9e3ca9705ef3341091fb989b7eaf"
# If you decided to use a private Hugging Face Inference Endpoint for the RENDERING engine
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud"
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
RENDERING_HF_INFERENCE_API_BASE_MODEL="stabilityai/stable-diffusion-xl-base-1.0"
# If you decided to use a Hugging Face Inference API model for the RENDERING engine
RENDERING_HF_INFERENCE_API_REFINER_MODEL="stabilityai/stable-diffusion-xl-refiner-1.0"
# If your model returns a different file type (eg. jpg or webp) change it here
RENDERING_HF_INFERENCE_API_FILE_TYPE="image/png"
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
RENDERING_VIDEOCHAIN_API_URL="http://localhost:7860"
RENDERING_OPENAI_API_BASE_URL="https://api.openai.com/v1"
RENDERING_OPENAI_API_MODEL="dall-e-3"
# ------------- LLM API CONFIG ----------------
# If you decided to use OpenAI for the LLM engine
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
LLM_OPENAI_API_MODEL="gpt-4"
# If you decided to use a private Hugging Face Inference Endpoint for the LLM engine
LLM_HF_INFERENCE_ENDPOINT_URL=""
# If you decided to use a Hugging Face Inference API model for the LLM engine
# LLM_HF_INFERENCE_API_MODEL="meta-llama/Llama-2-70b-chat-hf"
LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta"
# ----------- COMMUNITY SHARING (OPTIONAL) -----------
# You don't need those community sharing options to run the AI Comic Factory
# locally or on your own server (they are meant to be used by the Hugging Face team)
NEXT_PUBLIC_ENABLE_COMMUNITY_SHARING="false"
COMMUNITY_API_URL=
COMMUNITY_API_TOKEN=
COMMUNITY_API_ID=
# ----------- CENSORSHIP (OPTIONAL) -----------
# censorship is currently disabled, but will be required when we create a "community roll"
# (a public repositoruy of user-generated comic strips)
ENABLE_CENSORSHIP="false"
# Due to the sensitive nature of some of keywords we want to ban (users try all kind of crazy illegal things)
# the words are are not put in clear in the source code, but behind an encryption key
# (I don't want the project to be flagged by an AI robot police on GitHub or something)
SECRET_FINGERPRINT=""
|