jbilcke-hf HF staff commited on
Commit
1e641f1
β€’
1 Parent(s): c0d281b

increase quality of VideoChain outputs

Browse files
src/app/engine/render.ts CHANGED
@@ -241,7 +241,7 @@ export async function newRender({
241
  prompt,
242
  // negativePrompt, unused for now
243
  nbFrames: 1,
244
- nbSteps: 25, // 20 = fast, 30 = better, 50 = best
245
  actionnables: [], // ["text block"],
246
  segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
247
  width,
 
241
  prompt,
242
  // negativePrompt, unused for now
243
  nbFrames: 1,
244
+ nbSteps: 30, // 20 = fast, 30 = better, 50 = best
245
  actionnables: [], // ["text block"],
246
  segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
247
  width,
src/app/queries/predictWithHuggingFace.ts CHANGED
@@ -15,7 +15,7 @@ export async function predict(inputs: string): Promise<string> {
15
  switch (llmEngine) {
16
  case "INFERENCE_ENDPOINT":
17
  if (inferenceEndpoint) {
18
- console.log("Using a custom HF Inference Endpoint")
19
  hfie = hf.endpoint(inferenceEndpoint)
20
  } else {
21
  const error = "No Inference Endpoint URL defined"
@@ -26,7 +26,7 @@ export async function predict(inputs: string): Promise<string> {
26
 
27
  case "INFERENCE_API":
28
  if (inferenceModel) {
29
- console.log("Using an HF Inference API Model")
30
  } else {
31
  const error = "No Inference API model defined"
32
  console.error(error)
 
15
  switch (llmEngine) {
16
  case "INFERENCE_ENDPOINT":
17
  if (inferenceEndpoint) {
18
+ // console.log("Using a custom HF Inference Endpoint")
19
  hfie = hf.endpoint(inferenceEndpoint)
20
  } else {
21
  const error = "No Inference Endpoint URL defined"
 
26
 
27
  case "INFERENCE_API":
28
  if (inferenceModel) {
29
+ // console.log("Using an HF Inference API Model")
30
  } else {
31
  const error = "No Inference API model defined"
32
  console.error(error)