tfrere commited on
Commit
eca7f7a
·
1 Parent(s): eb3e391
client/src/components/StoryChoices.jsx ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Box, Button, Typography, Chip } from "@mui/material";
2
+
3
+ // Function to convert text with ** to Chip elements
4
+ const formatTextWithBold = (text) => {
5
+ if (!text) return "";
6
+ const parts = text.split(/(\*\*.*?\*\*)/g);
7
+ return parts.map((part, index) => {
8
+ if (part.startsWith("**") && part.endsWith("**")) {
9
+ return (
10
+ <Chip
11
+ key={index}
12
+ label={part.slice(2, -2)}
13
+ size="small"
14
+ sx={{
15
+ mx: 0.5,
16
+ fontSize: "1.1rem",
17
+ backgroundColor: "rgba(255, 255, 255, 0.1)",
18
+ color: "white",
19
+ }}
20
+ />
21
+ );
22
+ }
23
+ return part;
24
+ });
25
+ };
26
+
27
+ export function StoryChoices({ choices = [], onChoice, disabled = false }) {
28
+ if (!choices || choices.length === 0) return null;
29
+
30
+ return (
31
+ <Box
32
+ sx={{
33
+ display: "flex",
34
+ flexDirection: "column",
35
+ justifyContent: "center",
36
+ alignItems: "center",
37
+ gap: 2,
38
+ p: 3,
39
+ minWidth: "350px",
40
+ height: "100%",
41
+ backgroundColor: "transparent",
42
+ }}
43
+ >
44
+ {choices.map((choice, index) => (
45
+ <Box
46
+ key={choice.id}
47
+ sx={{
48
+ display: "flex",
49
+ flexDirection: "column",
50
+ alignItems: "center",
51
+ gap: 1,
52
+ width: "100%",
53
+ }}
54
+ >
55
+ <Typography variant="caption" sx={{ opacity: 0.7, color: "white" }}>
56
+ Suggestion {index + 1}
57
+ </Typography>
58
+ <Button
59
+ variant="outlined"
60
+ size="large"
61
+ onClick={() => onChoice(choice.id)}
62
+ disabled={disabled}
63
+ sx={{
64
+ width: "100%",
65
+ textTransform: "none",
66
+ cursor: "pointer",
67
+ fontSize: "1.1rem",
68
+ padding: "16px 24px",
69
+ lineHeight: 1.3,
70
+ color: "white",
71
+ borderColor: "rgba(255, 255, 255, 0.23)",
72
+ "&:hover": {
73
+ borderColor: "white",
74
+ backgroundColor: "rgba(255, 255, 255, 0.05)",
75
+ },
76
+ "& .MuiChip-root": {
77
+ fontSize: "1.1rem",
78
+ },
79
+ }}
80
+ >
81
+ {formatTextWithBold(choice.text)}
82
+ </Button>
83
+ </Box>
84
+ ))}
85
+ </Box>
86
+ );
87
+ }
client/src/hooks/useImageGeneration.js CHANGED
@@ -1,12 +1,11 @@
1
  import axios from "axios";
 
2
 
3
  const API_URL = import.meta.env.VITE_API_URL || "http://localhost:8000";
4
 
5
  // Create axios instance with default config
6
  const api = axios.create({
7
- headers: {
8
- "x-client-id": `client_${Math.random().toString(36).substring(2)}`,
9
- },
10
  });
11
 
12
  export function useImageGeneration() {
 
1
  import axios from "axios";
2
+ import { getDefaultHeaders } from "../utils/session";
3
 
4
  const API_URL = import.meta.env.VITE_API_URL || "http://localhost:8000";
5
 
6
  // Create axios instance with default config
7
  const api = axios.create({
8
+ headers: getDefaultHeaders(),
 
 
9
  });
10
 
11
  export function useImageGeneration() {
client/src/hooks/useNarrator.js ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useRef } from "react";
2
+ import { storyApi } from "../utils/api";
3
+
4
+ export function useNarrator(isEnabled = true) {
5
+ const [isNarratorSpeaking, setIsNarratorSpeaking] = useState(false);
6
+ const audioRef = useRef(new Audio());
7
+
8
+ const stopNarration = () => {
9
+ if (audioRef.current) {
10
+ audioRef.current.pause();
11
+ audioRef.current.currentTime = 0;
12
+ setIsNarratorSpeaking(false);
13
+ }
14
+ };
15
+
16
+ const playNarration = async (text) => {
17
+ if (!isEnabled) return;
18
+
19
+ try {
20
+ // Stop any ongoing narration
21
+ stopNarration();
22
+
23
+ // Get audio from API
24
+ const response = await storyApi.narrate(text);
25
+
26
+ if (!response || !response.audio_base64) {
27
+ throw new Error("Pas d'audio reçu du serveur");
28
+ }
29
+
30
+ // Create audio blob and URL
31
+ const audioBlob = await fetch(
32
+ `data:audio/mpeg;base64,${response.audio_base64}`
33
+ ).then((r) => r.blob());
34
+ const audioUrl = URL.createObjectURL(audioBlob);
35
+
36
+ // Set up audio element
37
+ audioRef.current.src = audioUrl;
38
+ audioRef.current.onplay = () => setIsNarratorSpeaking(true);
39
+ audioRef.current.onended = () => {
40
+ setIsNarratorSpeaking(false);
41
+ URL.revokeObjectURL(audioUrl);
42
+ };
43
+ audioRef.current.onerror = () => {
44
+ console.error("Error playing audio");
45
+ setIsNarratorSpeaking(false);
46
+ URL.revokeObjectURL(audioUrl);
47
+ };
48
+
49
+ // Play audio
50
+ await audioRef.current.play();
51
+ } catch (error) {
52
+ console.error("Error in playNarration:", error);
53
+ setIsNarratorSpeaking(false);
54
+ }
55
+ };
56
+
57
+ return {
58
+ isNarratorSpeaking,
59
+ playNarration,
60
+ stopNarration,
61
+ };
62
+ }
client/src/hooks/useStoryCapture.js ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useCallback } from "react";
2
+ import html2canvas from "html2canvas";
3
+
4
+ export function useStoryCapture() {
5
+ const captureStory = useCallback(async (containerRef) => {
6
+ if (!containerRef.current) return null;
7
+
8
+ try {
9
+ // Trouver le conteneur scrollable (ComicLayout)
10
+ const scrollContainer = containerRef.current.querySelector(
11
+ "[data-comic-layout]"
12
+ );
13
+ if (!scrollContainer) {
14
+ console.error("Comic layout container not found");
15
+ return null;
16
+ }
17
+
18
+ // Sauvegarder les styles et positions originaux
19
+ const originalStyles = new Map();
20
+ const elementsToRestore = [
21
+ containerRef.current,
22
+ scrollContainer,
23
+ ...Array.from(scrollContainer.children),
24
+ ];
25
+
26
+ // Sauvegarder les styles originaux
27
+ elementsToRestore.forEach((el) => {
28
+ originalStyles.set(el, {
29
+ style: el.style.cssText,
30
+ scroll: { left: el.scrollLeft, top: el.scrollTop },
31
+ });
32
+ });
33
+
34
+ // Obtenir les dimensions totales (sans le padding)
35
+ const children = Array.from(scrollContainer.children);
36
+ const lastChild = children[children.length - 1];
37
+ const lastChildRect = lastChild.getBoundingClientRect();
38
+ const containerRect = scrollContainer.getBoundingClientRect();
39
+
40
+ // Calculer la largeur totale en incluant la position et la largeur complète du dernier élément
41
+ const totalWidth =
42
+ lastChildRect.x + lastChildRect.width - containerRect.x + 32; // Ajouter un petit padding de sécurité
43
+
44
+ const totalHeight = scrollContainer.scrollHeight;
45
+
46
+ // Préparer le conteneur pour la capture
47
+ Object.assign(containerRef.current.style, {
48
+ width: "auto",
49
+ height: "auto",
50
+ overflow: "visible",
51
+ });
52
+
53
+ // Préparer le conteneur scrollable
54
+ Object.assign(scrollContainer.style, {
55
+ width: `${totalWidth}px`,
56
+ height: `${totalHeight}px`,
57
+ position: "relative",
58
+ overflow: "visible",
59
+ display: "flex",
60
+ transform: "none",
61
+ transition: "none",
62
+ padding: "0",
63
+ justifyContent: "flex-start", // Forcer l'alignement à gauche
64
+ });
65
+
66
+ // Forcer un reflow
67
+ scrollContainer.offsetHeight;
68
+
69
+ // Capturer l'image
70
+ const canvas = await html2canvas(scrollContainer, {
71
+ scale: 2,
72
+ useCORS: true,
73
+ allowTaint: true,
74
+ backgroundColor: "#242424",
75
+ width: totalWidth,
76
+ height: totalHeight,
77
+ x: 0,
78
+ y: 0,
79
+ scrollX: 0,
80
+ scrollY: 0,
81
+ windowWidth: totalWidth,
82
+ windowHeight: totalHeight,
83
+ logging: true,
84
+ onclone: (clonedDoc) => {
85
+ const clonedContainer = clonedDoc.querySelector(
86
+ "[data-comic-layout]"
87
+ );
88
+ if (clonedContainer) {
89
+ Object.assign(clonedContainer.style, {
90
+ width: `${totalWidth}px`,
91
+ height: `${totalHeight}px`,
92
+ position: "relative",
93
+ overflow: "visible",
94
+ display: "flex",
95
+ transform: "none",
96
+ transition: "none",
97
+ padding: "0",
98
+ justifyContent: "flex-start",
99
+ });
100
+
101
+ // S'assurer que tous les enfants sont visibles et alignés
102
+ Array.from(clonedContainer.children).forEach(
103
+ (child, index, arr) => {
104
+ Object.assign(child.style, {
105
+ position: "relative",
106
+ transform: "none",
107
+ transition: "none",
108
+ marginLeft: "0",
109
+ marginRight: index < arr.length - 1 ? "16px" : "16px", // Garder une marge à droite même pour le dernier
110
+ });
111
+ }
112
+ );
113
+ }
114
+ },
115
+ });
116
+
117
+ // Restaurer tous les styles originaux
118
+ elementsToRestore.forEach((el) => {
119
+ const original = originalStyles.get(el);
120
+ if (original) {
121
+ el.style.cssText = original.style;
122
+ el.scrollLeft = original.scroll.left;
123
+ el.scrollTop = original.scroll.top;
124
+ }
125
+ });
126
+
127
+ return canvas.toDataURL("image/png", 1.0);
128
+ } catch (error) {
129
+ console.error("Error capturing story:", error);
130
+ return null;
131
+ }
132
+ }, []);
133
+
134
+ const downloadStoryImage = useCallback(
135
+ async (containerRef, filename = "my-story.png") => {
136
+ const imageUrl = await captureStory(containerRef);
137
+ if (!imageUrl) return;
138
+
139
+ const link = document.createElement("a");
140
+ link.href = imageUrl;
141
+ link.download = filename;
142
+ document.body.appendChild(link);
143
+ link.click();
144
+ document.body.removeChild(link);
145
+ },
146
+ [captureStory]
147
+ );
148
+
149
+ return {
150
+ captureStory,
151
+ downloadStoryImage,
152
+ };
153
+ }
client/src/layouts/ComicLayout.jsx CHANGED
@@ -1,11 +1,22 @@
1
- import { Box } from "@mui/material";
2
  import { LAYOUTS } from "./config";
3
  import { groupSegmentsIntoLayouts } from "./utils";
4
  import { useEffect, useRef } from "react";
5
  import { Panel } from "./Panel";
 
 
6
 
7
  // Component for displaying a page of panels
8
- function ComicPage({ layout, layoutIndex }) {
 
 
 
 
 
 
 
 
 
9
  // Calculer le nombre total d'images dans tous les segments de ce layout
10
  const totalImages = layout.segments.reduce((total, segment) => {
11
  return total + (segment.images?.length || 0);
@@ -13,55 +24,117 @@ function ComicPage({ layout, layoutIndex }) {
13
 
14
  return (
15
  <Box
16
- key={layoutIndex}
17
  sx={{
18
- display: "grid",
19
- gridTemplateColumns: `repeat(${LAYOUTS[layout.type].gridCols}, 1fr)`,
20
- gridTemplateRows: `repeat(${LAYOUTS[layout.type].gridRows}, 1fr)`,
21
  gap: 2,
22
  height: "100%",
23
- aspectRatio: "0.7",
24
- backgroundColor: "white",
25
- boxShadow: "0 0 10px rgba(0,0,0,0.1)",
26
- borderRadius: "4px",
27
- p: 2,
28
- flexShrink: 0,
29
  }}
30
  >
31
- {LAYOUTS[layout.type].panels
32
- .slice(0, totalImages)
33
- .map((panel, panelIndex) => {
34
- // Trouver le segment qui contient l'image pour ce panel
35
- let currentImageIndex = 0;
36
- let targetSegment = null;
37
- let targetImageIndex = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- for (const segment of layout.segments) {
40
- const segmentImageCount = segment.images?.length || 0;
41
- if (currentImageIndex + segmentImageCount > panelIndex) {
42
- targetSegment = segment;
43
- targetImageIndex = panelIndex - currentImageIndex;
44
- break;
 
 
45
  }
46
- currentImageIndex += segmentImageCount;
47
- }
48
 
49
- return (
50
- <Panel
51
- key={panelIndex}
52
- panel={panel}
53
- segment={targetSegment}
54
- panelIndex={targetImageIndex}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  />
56
- );
57
- })}
 
58
  </Box>
59
  );
60
  }
61
 
62
  // Main comic layout component
63
- export function ComicLayout({ segments }) {
64
- const layouts = groupSegmentsIntoLayouts(segments);
 
 
 
 
 
 
65
  const scrollContainerRef = useRef(null);
66
 
67
  // Effect to scroll to the right when new layouts are added
@@ -72,11 +145,16 @@ export function ComicLayout({ segments }) {
72
  behavior: "smooth",
73
  });
74
  }
75
- }, [layouts.length]); // Only run when the number of layouts changes
 
 
 
 
76
 
77
  return (
78
  <Box
79
  ref={scrollContainerRef}
 
80
  sx={{
81
  display: "flex",
82
  flexDirection: "row",
@@ -103,6 +181,12 @@ export function ComicLayout({ segments }) {
103
  key={layoutIndex}
104
  layout={layout}
105
  layoutIndex={layoutIndex}
 
 
 
 
 
 
106
  />
107
  ))}
108
  </Box>
 
1
+ import { Box, IconButton, Tooltip } from "@mui/material";
2
  import { LAYOUTS } from "./config";
3
  import { groupSegmentsIntoLayouts } from "./utils";
4
  import { useEffect, useRef } from "react";
5
  import { Panel } from "./Panel";
6
+ import { StoryChoices } from "../components/StoryChoices";
7
+ import PhotoCameraIcon from "@mui/icons-material/PhotoCamera";
8
 
9
  // Component for displaying a page of panels
10
+ function ComicPage({
11
+ layout,
12
+ layoutIndex,
13
+ isLastPage,
14
+ choices,
15
+ onChoice,
16
+ isLoading,
17
+ showScreenshot,
18
+ onScreenshot,
19
+ }) {
20
  // Calculer le nombre total d'images dans tous les segments de ce layout
21
  const totalImages = layout.segments.reduce((total, segment) => {
22
  return total + (segment.images?.length || 0);
 
24
 
25
  return (
26
  <Box
 
27
  sx={{
28
+ display: "flex",
29
+ flexDirection: "row",
 
30
  gap: 2,
31
  height: "100%",
 
 
 
 
 
 
32
  }}
33
  >
34
+ <Box
35
+ sx={{
36
+ display: "grid",
37
+ gridTemplateColumns: `repeat(${LAYOUTS[layout.type].gridCols}, 1fr)`,
38
+ gridTemplateRows: `repeat(${LAYOUTS[layout.type].gridRows}, 1fr)`,
39
+ gap: 2,
40
+ height: "100%",
41
+ aspectRatio: "0.7",
42
+ backgroundColor: "white",
43
+ boxShadow: "0 0 10px rgba(0,0,0,0.1)",
44
+ borderRadius: "4px",
45
+ p: 2,
46
+ pb: 4,
47
+ flexShrink: 0,
48
+ position: "relative",
49
+ }}
50
+ >
51
+ {LAYOUTS[layout.type].panels
52
+ .slice(0, totalImages)
53
+ .map((panel, panelIndex) => {
54
+ // Trouver le segment qui contient l'image pour ce panel
55
+ let currentImageIndex = 0;
56
+ let targetSegment = null;
57
+ let targetImageIndex = 0;
58
 
59
+ for (const segment of layout.segments) {
60
+ const segmentImageCount = segment.images?.length || 0;
61
+ if (currentImageIndex + segmentImageCount > panelIndex) {
62
+ targetSegment = segment;
63
+ targetImageIndex = panelIndex - currentImageIndex;
64
+ break;
65
+ }
66
+ currentImageIndex += segmentImageCount;
67
  }
 
 
68
 
69
+ return (
70
+ <Panel
71
+ key={panelIndex}
72
+ panel={panel}
73
+ segment={targetSegment}
74
+ panelIndex={targetImageIndex}
75
+ />
76
+ );
77
+ })}
78
+ <Box
79
+ sx={{
80
+ position: "absolute",
81
+ bottom: 8,
82
+ left: 0,
83
+ right: 0,
84
+ textAlign: "center",
85
+ color: "black",
86
+ fontSize: "0.875rem",
87
+ fontWeight: 500,
88
+ }}
89
+ >
90
+ {layoutIndex + 1}
91
+ </Box>
92
+ </Box>
93
+ {isLastPage && (choices?.length > 0 || showScreenshot) && (
94
+ <Box sx={{ display: "flex", flexDirection: "column", gap: 2 }}>
95
+ {showScreenshot && (
96
+ <Box sx={{ display: "flex", justifyContent: "center", p: 2 }}>
97
+ <Tooltip title="Capturer l'histoire">
98
+ <IconButton
99
+ onClick={onScreenshot}
100
+ sx={{
101
+ border: "1px solid",
102
+ borderColor: "rgba(255, 255, 255, 0.23)",
103
+ color: "white",
104
+ p: 2,
105
+ "&:hover": {
106
+ borderColor: "white",
107
+ backgroundColor: "rgba(255, 255, 255, 0.05)",
108
+ },
109
+ }}
110
+ >
111
+ <PhotoCameraIcon />
112
+ </IconButton>
113
+ </Tooltip>
114
+ </Box>
115
+ )}
116
+ {choices?.length > 0 && (
117
+ <StoryChoices
118
+ choices={choices}
119
+ onChoice={onChoice}
120
+ disabled={isLoading}
121
  />
122
+ )}
123
+ </Box>
124
+ )}
125
  </Box>
126
  );
127
  }
128
 
129
  // Main comic layout component
130
+ export function ComicLayout({
131
+ segments,
132
+ choices,
133
+ onChoice,
134
+ isLoading,
135
+ showScreenshot,
136
+ onScreenshot,
137
+ }) {
138
  const scrollContainerRef = useRef(null);
139
 
140
  // Effect to scroll to the right when new layouts are added
 
145
  behavior: "smooth",
146
  });
147
  }
148
+ }, [segments.length]);
149
+
150
+ // Filtrer les segments qui sont en cours de chargement
151
+ const loadedSegments = segments.filter((segment) => !segment.isLoading);
152
+ const layouts = groupSegmentsIntoLayouts(loadedSegments);
153
 
154
  return (
155
  <Box
156
  ref={scrollContainerRef}
157
+ data-comic-layout
158
  sx={{
159
  display: "flex",
160
  flexDirection: "row",
 
181
  key={layoutIndex}
182
  layout={layout}
183
  layoutIndex={layoutIndex}
184
+ isLastPage={layoutIndex === layouts.length - 1}
185
+ choices={choices}
186
+ onChoice={onChoice}
187
+ isLoading={isLoading}
188
+ showScreenshot={showScreenshot}
189
+ onScreenshot={onScreenshot}
190
  />
191
  ))}
192
  </Box>
client/src/layouts/Panel.jsx CHANGED
@@ -57,21 +57,27 @@ export function Panel({ segment, panel, panelIndex }) {
57
  borderRadius: "8px",
58
  overflow: "hidden",
59
  transition: "all 0.3s ease-in-out",
 
60
  }}
61
  >
62
  {segment && (
63
  <>
64
- {/* Image avec fade in */}
65
- {segment.images?.[panelIndex] && (
66
- <Box
67
- sx={{
68
- position: "relative",
69
- width: "100%",
70
- height: "100%",
71
- opacity: imageLoaded ? 1 : 0,
72
- transition: "opacity 0.5s ease-in-out",
73
- }}
74
- >
 
 
 
 
 
75
  <img
76
  src={`data:image/jpeg;base64,${segment.images[panelIndex]}`}
77
  alt={`Story scene ${panelIndex + 1}`}
@@ -84,23 +90,21 @@ export function Panel({ segment, panel, panelIndex }) {
84
  onLoad={handleImageLoad}
85
  onError={handleImageError}
86
  />
87
- </Box>
88
- )}
89
 
90
- {/* Spinner de chargement toujours affiché quand l'image n'est pas chargée */}
91
  {(!segment.images?.[panelIndex] || !imageLoaded) && (
92
  <Box
93
  sx={{
94
  position: "absolute",
95
  top: 0,
96
  left: 0,
97
- width: "100%",
98
- height: "100%",
99
  display: "flex",
100
  alignItems: "center",
101
  justifyContent: "center",
102
- flexDirection: "column",
103
- gap: 1,
104
  opacity: 0.5,
105
  backgroundColor: "white",
106
  zIndex: 1,
 
57
  borderRadius: "8px",
58
  overflow: "hidden",
59
  transition: "all 0.3s ease-in-out",
60
+ aspectRatio: `${panel.width} / ${panel.height}`, // Forcer le ratio même sans image
61
  }}
62
  >
63
  {segment && (
64
  <>
65
+ {/* Conteneur d'image avec dimensions fixes */}
66
+ <Box
67
+ sx={{
68
+ position: "absolute",
69
+ top: 0,
70
+ left: 0,
71
+ right: 0,
72
+ bottom: 0,
73
+ display: "flex",
74
+ alignItems: "center",
75
+ justifyContent: "center",
76
+ opacity: imageLoaded ? 1 : 0,
77
+ transition: "opacity 0.5s ease-in-out",
78
+ }}
79
+ >
80
+ {segment.images?.[panelIndex] && (
81
  <img
82
  src={`data:image/jpeg;base64,${segment.images[panelIndex]}`}
83
  alt={`Story scene ${panelIndex + 1}`}
 
90
  onLoad={handleImageLoad}
91
  onError={handleImageError}
92
  />
93
+ )}
94
+ </Box>
95
 
96
+ {/* Spinner de chargement */}
97
  {(!segment.images?.[panelIndex] || !imageLoaded) && (
98
  <Box
99
  sx={{
100
  position: "absolute",
101
  top: 0,
102
  left: 0,
103
+ right: 0,
104
+ bottom: 0,
105
  display: "flex",
106
  alignItems: "center",
107
  justifyContent: "center",
 
 
108
  opacity: 0.5,
109
  backgroundColor: "white",
110
  zIndex: 1,
client/src/layouts/config.js CHANGED
@@ -141,27 +141,37 @@ export const nonRandomLayouts = Object.keys(LAYOUTS).filter(
141
  (layout) => layout !== "COVER"
142
  );
143
 
 
 
 
 
 
 
 
 
144
  // Helper functions for layout configuration
145
- export const getNextLayoutType = (currentLayoutCount) => {
146
- // Get all available layouts except COVER
147
- const availableLayouts = Object.keys(LAYOUTS).filter(
148
- (layout) => layout !== "COVER"
149
- );
150
 
151
- // Use a pseudo-random selection based on the current count
152
- // but avoid repeating the same layout twice in a row
153
- const previousLayout = `LAYOUT_${
154
- (currentLayoutCount % availableLayouts.length) + 1
155
- }`;
156
- let nextLayout;
 
 
 
 
157
 
158
- do {
159
- const randomIndex =
160
- Math.floor(Math.random() * (availableLayouts.length - 1)) + 1;
161
- nextLayout = `LAYOUT_${randomIndex}`;
162
- } while (nextLayout === previousLayout);
163
 
164
- return nextLayout;
 
 
165
  };
166
 
167
  export const getLayoutDimensions = (layoutType, panelIndex) =>
 
141
  (layout) => layout !== "COVER"
142
  );
143
 
144
+ // Grouper les layouts par nombre de panneaux
145
+ export const LAYOUTS_BY_PANEL_COUNT = {
146
+ 1: ["COVER"],
147
+ 2: ["LAYOUT_2"], // Layouts avec exactement 2 panneaux
148
+ 3: ["LAYOUT_5"], // Layouts avec exactement 3 panneaux
149
+ 4: ["LAYOUT_3", "LAYOUT_4", "LAYOUT_6"], // Layouts avec exactement 4 panneaux
150
+ };
151
+
152
  // Helper functions for layout configuration
153
+ export const getNextLayoutType = (currentLayoutCount, imageCount) => {
154
+ // Obtenir les layouts disponibles pour ce nombre d'images
155
+ const availableLayouts = LAYOUTS_BY_PANEL_COUNT[imageCount] || [];
 
 
156
 
157
+ if (!availableLayouts.length) {
158
+ // Si aucun layout n'est disponible pour ce nombre d'images exact,
159
+ // utiliser le premier layout qui peut contenir au moins ce nombre d'images
160
+ for (let i = imageCount + 1; i <= 4; i++) {
161
+ if (LAYOUTS_BY_PANEL_COUNT[i]?.length) {
162
+ availableLayouts.push(...LAYOUTS_BY_PANEL_COUNT[i]);
163
+ break;
164
+ }
165
+ }
166
+ }
167
 
168
+ if (!availableLayouts.length) {
169
+ return "LAYOUT_1"; // Layout par défaut si rien ne correspond
170
+ }
 
 
171
 
172
+ // Sélectionner un layout aléatoire parmi ceux disponibles
173
+ const randomIndex = Math.floor(Math.random() * availableLayouts.length);
174
+ return availableLayouts[randomIndex];
175
  };
176
 
177
  export const getLayoutDimensions = (layoutType, panelIndex) =>
client/src/layouts/utils.js CHANGED
@@ -8,42 +8,19 @@ export function groupSegmentsIntoLayouts(segments) {
8
  if (!segments || segments.length === 0) return [];
9
 
10
  const layouts = [];
11
- let currentLayout = null;
12
- let currentPanelIndex = 0;
13
 
14
  segments.forEach((segment) => {
 
 
15
  // Si c'est le premier segment ou le dernier (mort/victoire), créer un layout COVER
16
  if (segment.is_first_step || segment.is_last_step) {
17
- currentLayout = { type: "COVER", segments: [segment] };
18
- layouts.push(currentLayout);
19
- currentPanelIndex = segment.images?.length || 0;
20
  return;
21
  }
22
 
23
- // Si pas de layout courant ou si tous les panels sont remplis, en créer un nouveau
24
- if (
25
- !currentLayout ||
26
- currentPanelIndex >= LAYOUTS[currentLayout.type].panels.length
27
- ) {
28
- // Utiliser le layout existant pour cette page ou en créer un nouveau
29
- const pageIndex = layouts.length;
30
- let nextType = pageLayoutMap.get(pageIndex);
31
- if (!nextType) {
32
- nextType = getNextLayoutType(layouts.length);
33
- pageLayoutMap.set(pageIndex, nextType);
34
- }
35
- currentLayout = { type: nextType, segments: [] };
36
- layouts.push(currentLayout);
37
- currentPanelIndex = 0;
38
- }
39
-
40
- // Ajouter le segment au layout courant
41
- currentLayout.segments.push(segment);
42
-
43
- // Mettre à jour l'index du panel pour le prochain segment
44
- if (segment.images) {
45
- currentPanelIndex += segment.images.length;
46
- }
47
  });
48
 
49
  return layouts;
@@ -63,30 +40,15 @@ export function getNextPanelDimensions(segments) {
63
  return LAYOUTS.COVER.panels[0];
64
  }
65
 
66
- // Pour les segments du milieu, déterminer le layout et la position dans ce layout
67
- const layouts = groupSegmentsIntoLayouts(nonChoiceSegments.slice(0, -1));
68
- const lastLayout = layouts[layouts.length - 1];
69
- const segmentsInLastLayout = lastLayout ? lastLayout.segments.length : 0;
70
-
71
- // Utiliser le layout existant ou en créer un nouveau
72
- const pageIndex = layouts.length;
73
- let nextLayoutType = pageLayoutMap.get(pageIndex);
74
- if (!nextLayoutType) {
75
- nextLayoutType = getNextLayoutType(layouts.length);
76
- pageLayoutMap.set(pageIndex, nextLayoutType);
77
- }
78
- const nextPanelIndex = segmentsInLastLayout;
79
-
80
- // Si le dernier layout est plein, prendre le premier panneau du prochain layout
81
- if (
82
- !lastLayout ||
83
- segmentsInLastLayout >= LAYOUTS[lastLayout.type].panels.length
84
- ) {
85
- return LAYOUTS[nextLayoutType].panels[0];
86
- }
87
 
88
- // Sinon, prendre le prochain panneau du layout courant
89
- return LAYOUTS[lastLayout.type].panels[nextPanelIndex];
90
  }
91
 
92
  // Function to reset layout map (call this when starting a new story)
 
8
  if (!segments || segments.length === 0) return [];
9
 
10
  const layouts = [];
 
 
11
 
12
  segments.forEach((segment) => {
13
+ const imageCount = segment.images?.length || 0;
14
+
15
  // Si c'est le premier segment ou le dernier (mort/victoire), créer un layout COVER
16
  if (segment.is_first_step || segment.is_last_step) {
17
+ layouts.push({ type: "COVER", segments: [segment] });
 
 
18
  return;
19
  }
20
 
21
+ // Pour tous les autres segments, créer un layout adapté au nombre d'images
22
+ const layoutType = getNextLayoutType(layouts.length, imageCount);
23
+ layouts.push({ type: layoutType, segments: [segment] });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  });
25
 
26
  return layouts;
 
40
  return LAYOUTS.COVER.panels[0];
41
  }
42
 
43
+ // Pour les segments du milieu, déterminer le layout en fonction du nombre d'images
44
+ const lastSegment = nonChoiceSegments[nonChoiceSegments.length - 1];
45
+ const imageCount = lastSegment.images?.length || 0;
46
+ const layoutType = getNextLayoutType(
47
+ nonChoiceSegments.length - 1,
48
+ imageCount
49
+ );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ return LAYOUTS[layoutType].panels[0];
 
52
  }
53
 
54
  // Function to reset layout map (call this when starting a new story)
client/src/pages/Game.jsx CHANGED
@@ -1,25 +1,367 @@
1
- import { motion } from "framer-motion";
2
- import App from "./game/App";
3
- import { Box } from "@mui/material";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- export const Game = () => {
6
  return (
7
- <motion.div
8
- initial={{ opacity: 0 }}
9
- animate={{ opacity: 1 }}
10
- exit={{ opacity: 0 }}
11
- transition={{ duration: 0.5 }}
 
 
 
12
  >
13
  <Box
14
  sx={{
15
- minHeight: "100vh",
16
- bgcolor: "background.default",
 
 
 
17
  }}
18
  >
19
- <App />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  </Box>
21
- </motion.div>
22
  );
23
- };
24
 
25
  export default Game;
 
1
+ import { useState, useEffect, useRef } from "react";
2
+ import { Box, LinearProgress, IconButton, Tooltip } from "@mui/material";
3
+ import { ComicLayout } from "../layouts/ComicLayout";
4
+ import { storyApi } from "../utils/api";
5
+ import { useNarrator } from "../hooks/useNarrator";
6
+ import { useStoryCapture } from "../hooks/useStoryCapture";
7
+ import { StoryChoices } from "../components/StoryChoices";
8
+ import VolumeUpIcon from "@mui/icons-material/VolumeUp";
9
+ import VolumeOffIcon from "@mui/icons-material/VolumeOff";
10
+ import PhotoCameraIcon from "@mui/icons-material/PhotoCamera";
11
+
12
+ // Constants
13
+ const NARRATION_ENABLED_KEY = "narration_enabled";
14
+
15
+ // Function to convert text with ** to Chip elements
16
+ const formatTextWithBold = (text, isInPanel = false) => {
17
+ if (!text) return "";
18
+ const parts = text.split(/(\*\*.*?\*\*)/g);
19
+ return parts.map((part, index) => {
20
+ if (part.startsWith("**") && part.endsWith("**")) {
21
+ return part.slice(2, -2);
22
+ }
23
+ return part;
24
+ });
25
+ };
26
+
27
+ // Function to strip bold markers from text for narration
28
+ const stripBoldMarkers = (text) => {
29
+ return text.replace(/\*\*/g, "");
30
+ };
31
+
32
+ export function Game() {
33
+ const storyContainerRef = useRef(null);
34
+ const { downloadStoryImage } = useStoryCapture();
35
+ const [storySegments, setStorySegments] = useState([]);
36
+ const [currentChoices, setCurrentChoices] = useState([]);
37
+ const [isLoading, setIsLoading] = useState(false);
38
+ const [isNarrationEnabled, setIsNarrationEnabled] = useState(() => {
39
+ // Initialiser depuis le localStorage avec true comme valeur par défaut
40
+ const stored = localStorage.getItem(NARRATION_ENABLED_KEY);
41
+ return stored === null ? true : stored === "true";
42
+ });
43
+ const { isNarratorSpeaking, playNarration, stopNarration } =
44
+ useNarrator(isNarrationEnabled);
45
+
46
+ // Sauvegarder l'état de la narration dans le localStorage
47
+ useEffect(() => {
48
+ localStorage.setItem(NARRATION_ENABLED_KEY, isNarrationEnabled);
49
+ }, [isNarrationEnabled]);
50
+
51
+ // Start the story on first render
52
+ useEffect(() => {
53
+ handleStoryAction("restart");
54
+ }, []);
55
+
56
+ const handleChoice = async (choiceId) => {
57
+ // Si c'est l'option "Réessayer", on relance la dernière action
58
+ if (currentChoices.length === 1 && currentChoices[0].text === "Réessayer") {
59
+ // Supprimer le segment d'erreur
60
+ setStorySegments((prev) => prev.slice(0, -1));
61
+ // Réessayer la dernière action
62
+ await handleStoryAction(
63
+ "choice",
64
+ storySegments[storySegments.length - 2]?.choiceId || null
65
+ );
66
+ return;
67
+ }
68
+
69
+ // Ajouter le choix comme segment
70
+ const choice = currentChoices.find((c) => c.id === choiceId);
71
+ setStorySegments((prev) => [
72
+ ...prev,
73
+ {
74
+ text: choice.text,
75
+ rawText: stripBoldMarkers(choice.text),
76
+ isChoice: true,
77
+ choiceId: choiceId,
78
+ },
79
+ ]);
80
+
81
+ // Continuer l'histoire avec ce choix
82
+ await handleStoryAction("choice", choiceId);
83
+ };
84
+
85
+ const handleStoryAction = async (action, choiceId = null) => {
86
+ setIsLoading(true);
87
+ try {
88
+ // Stop any ongoing narration
89
+ if (isNarratorSpeaking) {
90
+ stopNarration();
91
+ }
92
+
93
+ console.log("Starting story action:", action);
94
+ // 1. Get the story
95
+ const storyData = await (action === "restart"
96
+ ? storyApi.start()
97
+ : storyApi.makeChoice(choiceId));
98
+
99
+ if (!storyData) {
100
+ throw new Error("Pas de données reçues du serveur");
101
+ }
102
+
103
+ // 2. Create new segment without images
104
+ const newSegment = {
105
+ text: formatTextWithBold(storyData.story_text, true),
106
+ rawText: stripBoldMarkers(storyData.story_text), // Store raw text for narration
107
+ isChoice: false,
108
+ isDeath: storyData.is_death,
109
+ isVictory: storyData.is_victory,
110
+ radiationLevel: storyData.radiation_level,
111
+ is_first_step: storyData.is_first_step,
112
+ is_last_step: storyData.is_last_step,
113
+ images: [],
114
+ isLoading: true, // Ajout d'un flag pour indiquer que le segment est en cours de chargement
115
+ };
116
+
117
+ // 3. Update segments
118
+ if (action === "restart") {
119
+ setStorySegments([newSegment]);
120
+ } else {
121
+ setStorySegments((prev) => [...prev, newSegment]);
122
+ }
123
+
124
+ // 4. Update choices
125
+ setCurrentChoices(storyData.choices || []);
126
+
127
+ // 5. Start narration of the new segment
128
+ await playNarration(newSegment.rawText);
129
+
130
+ // 6. Generate images in parallel
131
+ if (storyData.image_prompts && storyData.image_prompts.length > 0) {
132
+ console.log(
133
+ "Starting image generation for prompts:",
134
+ storyData.image_prompts
135
+ );
136
+ generateImagesForStory(
137
+ storyData.image_prompts,
138
+ action === "restart" ? 0 : storySegments.length,
139
+ action === "restart" ? [newSegment] : [...storySegments, newSegment]
140
+ );
141
+ } else {
142
+ // Si pas d'images, marquer le segment comme chargé
143
+ const updatedSegment = { ...newSegment, isLoading: false };
144
+ if (action === "restart") {
145
+ setStorySegments([updatedSegment]);
146
+ } else {
147
+ setStorySegments((prev) => [...prev.slice(0, -1), updatedSegment]);
148
+ }
149
+ }
150
+ } catch (error) {
151
+ console.error("Error in handleStoryAction:", error);
152
+ const errorSegment = {
153
+ text:
154
+ error.message ||
155
+ "Le conteur d'histoires est temporairement indisponible. Veuillez réessayer dans quelques instants...",
156
+ rawText:
157
+ error.message ||
158
+ "Le conteur d'histoires est temporairement indisponible. Veuillez réessayer dans quelques instants...",
159
+ isChoice: false,
160
+ isDeath: false,
161
+ isVictory: false,
162
+ radiationLevel:
163
+ storySegments.length > 0
164
+ ? storySegments[storySegments.length - 1].radiationLevel
165
+ : 0,
166
+ images: [],
167
+ };
168
+
169
+ if (action === "restart") {
170
+ setStorySegments([errorSegment]);
171
+ } else {
172
+ setStorySegments((prev) => [...prev, errorSegment]);
173
+ }
174
+
175
+ // Set retry choice
176
+ setCurrentChoices([{ id: 1, text: "Réessayer" }]);
177
+
178
+ // Play error message
179
+ await playNarration(errorSegment.rawText);
180
+ } finally {
181
+ setIsLoading(false);
182
+ }
183
+ };
184
+
185
+ const generateImagesForStory = async (
186
+ imagePrompts,
187
+ segmentIndex,
188
+ currentSegments
189
+ ) => {
190
+ try {
191
+ let localSegments = [...currentSegments];
192
+ const images = Array(imagePrompts.length).fill(null);
193
+ let allImagesGenerated = false;
194
+
195
+ for (
196
+ let promptIndex = 0;
197
+ promptIndex < imagePrompts.length;
198
+ promptIndex++
199
+ ) {
200
+ let retryCount = 0;
201
+ const maxRetries = 3;
202
+ let success = false;
203
+
204
+ while (retryCount < maxRetries && !success) {
205
+ try {
206
+ console.log(
207
+ `Generating image ${promptIndex + 1}/${imagePrompts.length}`
208
+ );
209
+ const result = await storyApi.generateImage(
210
+ imagePrompts[promptIndex]
211
+ );
212
+
213
+ if (!result) {
214
+ throw new Error("Pas de résultat de génération d'image");
215
+ }
216
+
217
+ if (result.success) {
218
+ console.log(`Successfully generated image ${promptIndex + 1}`);
219
+ images[promptIndex] = result.image_base64;
220
+
221
+ // Vérifier si toutes les images sont générées
222
+ allImagesGenerated = images.every((img) => img !== null);
223
+
224
+ // Ne mettre à jour le segment que si toutes les images sont générées
225
+ if (allImagesGenerated) {
226
+ localSegments[segmentIndex] = {
227
+ ...localSegments[segmentIndex],
228
+ images,
229
+ isLoading: false,
230
+ };
231
+ setStorySegments([...localSegments]);
232
+ }
233
+ success = true;
234
+ } else {
235
+ console.warn(
236
+ `Failed to generate image ${promptIndex + 1}, attempt ${
237
+ retryCount + 1
238
+ }`
239
+ );
240
+ retryCount++;
241
+ }
242
+ } catch (error) {
243
+ console.error(`Error generating image ${promptIndex + 1}:`, error);
244
+ retryCount++;
245
+ }
246
+ }
247
+
248
+ if (!success) {
249
+ console.error(
250
+ `Failed to generate image ${
251
+ promptIndex + 1
252
+ } after ${maxRetries} attempts`
253
+ );
254
+ }
255
+ }
256
+ } catch (error) {
257
+ console.error("Error in generateImagesForStory:", error);
258
+ }
259
+ };
260
+
261
+ // Filter out choice segments for display
262
+ const nonChoiceSegments = storySegments.filter(
263
+ (segment) => !segment.isChoice
264
+ );
265
+
266
+ const handleCaptureStory = async () => {
267
+ await downloadStoryImage(
268
+ storyContainerRef,
269
+ `dont-lookup-story-${Date.now()}.png`
270
+ );
271
+ };
272
 
 
273
  return (
274
+ <Box
275
+ sx={{
276
+ height: "100vh",
277
+ width: "100%",
278
+ display: "flex",
279
+ flexDirection: "column",
280
+ backgroundColor: "background.paper",
281
+ }}
282
  >
283
  <Box
284
  sx={{
285
+ position: "relative",
286
+ height: "100%",
287
+ display: "flex",
288
+ flexDirection: "column",
289
+ backgroundColor: "#121212",
290
  }}
291
  >
292
+ {/* Narration control - always visible in top right */}
293
+ <Box
294
+ sx={{
295
+ position: "fixed",
296
+ top: 16,
297
+ right: 16,
298
+ zIndex: 1000,
299
+ }}
300
+ >
301
+ <Tooltip
302
+ title={
303
+ isNarrationEnabled
304
+ ? "Désactiver la narration"
305
+ : "Activer la narration"
306
+ }
307
+ >
308
+ <IconButton
309
+ onClick={() => setIsNarrationEnabled(!isNarrationEnabled)}
310
+ sx={{
311
+ backgroundColor: isNarrationEnabled
312
+ ? "primary.main"
313
+ : "rgba(255, 255, 255, 0.1)",
314
+ color: "white",
315
+ "&:hover": {
316
+ backgroundColor: isNarrationEnabled
317
+ ? "primary.dark"
318
+ : "rgba(255, 255, 255, 0.2)",
319
+ },
320
+ }}
321
+ >
322
+ {isNarrationEnabled ? <VolumeUpIcon /> : <VolumeOffIcon />}
323
+ </IconButton>
324
+ </Tooltip>
325
+ </Box>
326
+
327
+ {/* Progress bar */}
328
+ {isLoading && (
329
+ <LinearProgress
330
+ sx={{
331
+ position: "absolute",
332
+ top: 0,
333
+ left: 0,
334
+ right: 0,
335
+ zIndex: 1,
336
+ }}
337
+ />
338
+ )}
339
+
340
+ {/* Comic layout */}
341
+ <Box
342
+ ref={storyContainerRef}
343
+ sx={{
344
+ flex: 1,
345
+ overflow: "hidden",
346
+ position: "relative",
347
+ p: 4,
348
+ }}
349
+ >
350
+ <ComicLayout
351
+ segments={storySegments}
352
+ choices={currentChoices}
353
+ onChoice={handleChoice}
354
+ isLoading={isLoading || isNarratorSpeaking}
355
+ showScreenshot={
356
+ currentChoices.length === 1 &&
357
+ currentChoices[0].text === "Réessayer"
358
+ }
359
+ onScreenshot={() => downloadStoryImage(storyContainerRef)}
360
+ />
361
+ </Box>
362
  </Box>
363
+ </Box>
364
  );
365
+ }
366
 
367
  export default Game;
client/src/pages/game/App.jsx CHANGED
@@ -21,6 +21,8 @@ import {
21
  import { LAYOUTS } from "../../layouts/config";
22
  import html2canvas from "html2canvas";
23
  import { useConversation } from "@11labs/react";
 
 
24
 
25
  // Get API URL from environment or default to localhost in development
26
  const isHFSpace = window.location.hostname.includes("hf.space");
@@ -28,8 +30,6 @@ const API_URL = isHFSpace
28
  ? "" // URL relative pour HF Spaces
29
  : import.meta.env.VITE_API_URL || "http://localhost:8000";
30
 
31
- // Generate a unique client ID
32
- const CLIENT_ID = `client_${Math.random().toString(36).substring(2)}`;
33
  // Constants
34
  const AGENT_ID = "2MF9st3s1mNFbX01Y106";
35
 
@@ -37,9 +37,7 @@ const WS_URL = import.meta.env.VITE_WS_URL || "ws://localhost:8000/ws";
37
 
38
  // Create axios instance with default config
39
  const api = axios.create({
40
- headers: {
41
- "x-client-id": CLIENT_ID,
42
- },
43
  // Ajouter baseURL pour HF Spaces
44
  ...(isHFSpace && {
45
  baseURL: window.location.origin,
@@ -80,15 +78,14 @@ function App() {
80
  const [isLoading, setIsLoading] = useState(false);
81
  const [isDebugMode, setIsDebugMode] = useState(false);
82
  const [isRecording, setIsRecording] = useState(false);
83
- const [isNarratorSpeaking, setIsNarratorSpeaking] = useState(false);
84
  const [wsConnected, setWsConnected] = useState(false);
85
 
86
- const audioRef = useRef(new Audio());
87
  const comicContainerRef = useRef(null);
88
- const narrationAudioRef = useRef(new Audio()); // Separate audio ref for narration
89
- const wsRef = useRef(null);
90
  const mediaRecorderRef = useRef(null);
91
  const audioChunksRef = useRef([]);
 
 
 
92
 
93
  // Start the story on first render
94
  useEffect(() => {
@@ -122,9 +119,8 @@ function App() {
122
 
123
  if (data.type === "audio") {
124
  // Stop any ongoing narration
125
- if (narrationAudioRef.current) {
126
- narrationAudioRef.current.pause();
127
- narrationAudioRef.current.currentTime = 0;
128
  }
129
 
130
  // Play the conversation audio response
@@ -132,8 +128,7 @@ function App() {
132
  `data:audio/mpeg;base64,${data.audio}`
133
  ).then((r) => r.blob());
134
  const audioUrl = URL.createObjectURL(audioBlob);
135
- audioRef.current.src = audioUrl;
136
- await audioRef.current.play();
137
  }
138
  };
139
  };
@@ -154,8 +149,7 @@ function App() {
154
  // Play the conversation audio response
155
  const audioBlob = new Blob([response.audio], { type: "audio/mpeg" });
156
  const audioUrl = URL.createObjectURL(audioBlob);
157
- audioRef.current.src = audioUrl;
158
- await audioRef.current.play();
159
  }
160
  },
161
  clientTools: {
@@ -163,7 +157,6 @@ function App() {
163
  console.log("AI made decision:", decision);
164
  // End the ElevenLabs conversation
165
  await conversation.endSession();
166
- setIsConversationMode(false);
167
  setIsRecording(false);
168
  // Handle the choice and generate next story part
169
  await handleChoice(parseInt(decision));
@@ -177,14 +170,13 @@ function App() {
177
  const startRecording = async () => {
178
  try {
179
  // Stop narration audio if it's playing
180
- if (narrationAudioRef.current) {
181
- narrationAudioRef.current.pause();
182
- narrationAudioRef.current.currentTime = 0;
183
  }
184
  // Also stop any conversation audio if playing
185
- if (audioRef.current) {
186
- audioRef.current.pause();
187
- audioRef.current.currentTime = 0;
188
  }
189
 
190
  if (!isConversationMode) {
@@ -217,9 +209,8 @@ function App() {
217
  }
218
 
219
  // Only stop narration if it's actually playing
220
- if (!isConversationMode && narrationAudioRef.current) {
221
- narrationAudioRef.current.pause();
222
- narrationAudioRef.current.currentTime = 0;
223
  }
224
 
225
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
@@ -483,41 +474,6 @@ function App() {
483
  }
484
  };
485
 
486
- // Fonction pour jouer l'audio
487
- const playAudio = async (text) => {
488
- try {
489
- // Nettoyer le texte des balises markdown et des chips
490
- const cleanText = text.replace(/\*\*(.*?)\*\*/g, "$1");
491
-
492
- // Appeler l'API text-to-speech
493
- const response = await api.post(`${API_URL}/api/text-to-speech`, {
494
- text: cleanText,
495
- });
496
-
497
- if (response.data.success) {
498
- // Créer un Blob à partir du base64
499
- const audioBlob = await fetch(
500
- `data:audio/mpeg;base64,${response.data.audio_base64}`
501
- ).then((r) => r.blob());
502
- const audioUrl = URL.createObjectURL(audioBlob);
503
-
504
- // Mettre à jour la source de l'audio
505
- audioRef.current.src = audioUrl;
506
- audioRef.current.play();
507
- setIsNarratorSpeaking(true);
508
-
509
- // Nettoyer l'URL quand l'audio est terminé
510
- audioRef.current.onended = () => {
511
- // Event to indicate that the audio has finished playing
512
- setIsNarratorSpeaking(false);
513
- URL.revokeObjectURL(audioUrl);
514
- };
515
- }
516
- } catch (error) {
517
- console.error("Error playing audio:", error);
518
- }
519
- };
520
-
521
  const handleStoryAction = async (action, choiceId = null) => {
522
  setIsLoading(true);
523
  try {
@@ -567,7 +523,7 @@ function App() {
567
  setIsLoading(false);
568
 
569
  // 6. Jouer l'audio du nouveau segment
570
- await playAudio(response.data.story_text);
571
 
572
  // 7. Générer les images en parallèle
573
  if (
 
21
  import { LAYOUTS } from "../../layouts/config";
22
  import html2canvas from "html2canvas";
23
  import { useConversation } from "@11labs/react";
24
+ import { CLIENT_ID, getDefaultHeaders } from "../../utils/session";
25
+ import { useNarrator } from "../../hooks/useNarrator";
26
 
27
  // Get API URL from environment or default to localhost in development
28
  const isHFSpace = window.location.hostname.includes("hf.space");
 
30
  ? "" // URL relative pour HF Spaces
31
  : import.meta.env.VITE_API_URL || "http://localhost:8000";
32
 
 
 
33
  // Constants
34
  const AGENT_ID = "2MF9st3s1mNFbX01Y106";
35
 
 
37
 
38
  // Create axios instance with default config
39
  const api = axios.create({
40
+ headers: getDefaultHeaders(),
 
 
41
  // Ajouter baseURL pour HF Spaces
42
  ...(isHFSpace && {
43
  baseURL: window.location.origin,
 
78
  const [isLoading, setIsLoading] = useState(false);
79
  const [isDebugMode, setIsDebugMode] = useState(false);
80
  const [isRecording, setIsRecording] = useState(false);
 
81
  const [wsConnected, setWsConnected] = useState(false);
82
 
 
83
  const comicContainerRef = useRef(null);
 
 
84
  const mediaRecorderRef = useRef(null);
85
  const audioChunksRef = useRef([]);
86
+ const wsRef = useRef(null);
87
+
88
+ const { isNarratorSpeaking, playNarration, stopNarration } = useNarrator();
89
 
90
  // Start the story on first render
91
  useEffect(() => {
 
119
 
120
  if (data.type === "audio") {
121
  // Stop any ongoing narration
122
+ if (isNarratorSpeaking) {
123
+ stopNarration();
 
124
  }
125
 
126
  // Play the conversation audio response
 
128
  `data:audio/mpeg;base64,${data.audio}`
129
  ).then((r) => r.blob());
130
  const audioUrl = URL.createObjectURL(audioBlob);
131
+ playNarration(audioUrl);
 
132
  }
133
  };
134
  };
 
149
  // Play the conversation audio response
150
  const audioBlob = new Blob([response.audio], { type: "audio/mpeg" });
151
  const audioUrl = URL.createObjectURL(audioBlob);
152
+ playNarration(audioUrl);
 
153
  }
154
  },
155
  clientTools: {
 
157
  console.log("AI made decision:", decision);
158
  // End the ElevenLabs conversation
159
  await conversation.endSession();
 
160
  setIsRecording(false);
161
  // Handle the choice and generate next story part
162
  await handleChoice(parseInt(decision));
 
170
  const startRecording = async () => {
171
  try {
172
  // Stop narration audio if it's playing
173
+ if (isNarratorSpeaking) {
174
+ stopNarration();
 
175
  }
176
  // Also stop any conversation audio if playing
177
+ if (conversation.audioRef.current) {
178
+ conversation.audioRef.current.pause();
179
+ conversation.audioRef.current.currentTime = 0;
180
  }
181
 
182
  if (!isConversationMode) {
 
209
  }
210
 
211
  // Only stop narration if it's actually playing
212
+ if (!isConversationMode && isNarratorSpeaking) {
213
+ stopNarration();
 
214
  }
215
 
216
  const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
 
474
  }
475
  };
476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
  const handleStoryAction = async (action, choiceId = null) => {
478
  setIsLoading(true);
479
  try {
 
523
  setIsLoading(false);
524
 
525
  // 6. Jouer l'audio du nouveau segment
526
+ await playNarration(response.data.story_text);
527
 
528
  // 7. Générer les images en parallèle
529
  if (
client/src/utils/api.js ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import axios from "axios";
2
+ import { getDefaultHeaders } from "./session";
3
+
4
+ // Get API URL from environment or default to localhost in development
5
+ const isHFSpace = window.location.hostname.includes("hf.space");
6
+ const API_URL = isHFSpace
7
+ ? "" // URL relative pour HF Spaces
8
+ : import.meta.env.VITE_API_URL || "http://localhost:8000";
9
+
10
+ // Create axios instance with default config
11
+ const api = axios.create({
12
+ baseURL: API_URL,
13
+ headers: getDefaultHeaders(),
14
+ ...(isHFSpace && {
15
+ baseURL: window.location.origin,
16
+ }),
17
+ });
18
+
19
+ // Error handling middleware
20
+ const handleApiError = (error) => {
21
+ console.error("API Error:", {
22
+ status: error.response?.status,
23
+ statusText: error.response?.statusText,
24
+ data: error.response?.data,
25
+ config: {
26
+ method: error.config?.method,
27
+ url: error.config?.url,
28
+ data: error.config?.data,
29
+ },
30
+ });
31
+
32
+ if (error.response) {
33
+ // La requête a été faite et le serveur a répondu avec un code d'erreur
34
+ throw new Error(
35
+ error.response.data?.message ||
36
+ `Erreur ${error.response.status}: ${error.response.statusText}`
37
+ );
38
+ } else if (error.request) {
39
+ // La requête a été faite mais aucune réponse n'a été reçue
40
+ throw new Error("Aucune réponse du serveur");
41
+ } else {
42
+ // Une erreur s'est produite lors de la configuration de la requête
43
+ throw new Error(
44
+ "Une erreur est survenue lors de la configuration de la requête"
45
+ );
46
+ }
47
+ };
48
+
49
+ // Story related API calls
50
+ export const storyApi = {
51
+ start: async () => {
52
+ try {
53
+ console.log("Calling start API...");
54
+ const response = await api.post("/api/chat", {
55
+ message: "restart",
56
+ });
57
+ console.log("Start API response:", response.data);
58
+ return response.data;
59
+ } catch (error) {
60
+ return handleApiError(error);
61
+ }
62
+ },
63
+
64
+ makeChoice: async (choiceId) => {
65
+ try {
66
+ console.log("Making choice:", choiceId);
67
+ const response = await api.post("/api/chat", {
68
+ message: "choice",
69
+ choice_id: choiceId,
70
+ });
71
+ console.log("Choice API response:", response.data);
72
+ return response.data;
73
+ } catch (error) {
74
+ return handleApiError(error);
75
+ }
76
+ },
77
+
78
+ generateImage: async (prompt, width = 512, height = 512) => {
79
+ try {
80
+ console.log("Generating image with prompt:", prompt);
81
+ const response = await api.post("/api/generate-image", {
82
+ prompt,
83
+ width,
84
+ height,
85
+ });
86
+ console.log("Image generation response:", {
87
+ success: response.data.success,
88
+ hasImage: !!response.data.image_base64,
89
+ });
90
+ return response.data;
91
+ } catch (error) {
92
+ return handleApiError(error);
93
+ }
94
+ },
95
+
96
+ // Narration related API calls
97
+ narrate: async (text) => {
98
+ try {
99
+ console.log("Requesting narration for:", text);
100
+ const response = await api.post("/api/text-to-speech", {
101
+ text,
102
+ });
103
+ console.log("Narration response received");
104
+ return response.data;
105
+ } catch (error) {
106
+ return handleApiError(error);
107
+ }
108
+ },
109
+ };
110
+
111
+ // WebSocket URL
112
+ export const WS_URL = import.meta.env.VITE_WS_URL || "ws://localhost:8000/ws";
113
+
114
+ // Export the base API instance for other uses
115
+ export default api;
client/src/utils/session.js ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ // Generate unique IDs for client and session
2
+ export const CLIENT_ID = `client_${Math.random().toString(36).substring(2)}`;
3
+ export const SESSION_ID = `session_${Math.random().toString(36).substring(2)}`;
4
+
5
+ // Create default headers for API requests
6
+ export const getDefaultHeaders = () => ({
7
+ "x-client-id": CLIENT_ID,
8
+ "x-session-id": SESSION_ID,
9
+ });
server/api/models.py CHANGED
@@ -3,16 +3,32 @@ from typing import List, Optional
3
 
4
  class Choice(BaseModel):
5
  id: int
6
- text: str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
 
8
  class StoryResponse(BaseModel):
9
- story_text: str = Field(description="The story text with proper nouns in bold using ** markdown")
10
  choices: List[Choice]
11
  radiation_level: int = Field(description="Current radiation level from 0 to 10")
12
  is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
13
  is_first_step: bool = Field(description="Whether this is the first step of the story", default=False)
14
  is_last_step: bool = Field(description="Whether this is the last step (victory or death)", default=False)
15
- image_prompts: List[str] = Field(description="List of 1 to 3 comic panel descriptions that illustrate the key moments of the scene", min_items=1, max_items=3)
16
 
17
  class ChatMessage(BaseModel):
18
  message: str
 
3
 
4
  class Choice(BaseModel):
5
  id: int
6
+ text: str = Field(description="The text of the choice with proper nouns in bold using ** markdown. No more than 6 words.")
7
+
8
+ # New response models for story generation steps
9
+ class StoryTextResponse(BaseModel):
10
+ story_text: str = Field(description="The story text with proper nouns in bold using ** markdown. No more than 15 words.")
11
+
12
+ class StoryPromptsResponse(BaseModel):
13
+ image_prompts: List[str] = Field(description="List of 2 to 4 comic panel descriptions that illustrate the key moments of the scene. Use the word 'Sarah' only when referring to her.", min_items=1, max_items=4)
14
+
15
+ class StoryMetadataResponse(BaseModel):
16
+ choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
17
+ is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
18
+ radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
19
+ is_last_step: bool = Field(description="Whether this is the last step (victory or death)", default=False)
20
+ time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.")
21
+ location: str = Field(description="Current location, using bold for proper nouns (e.g., 'Inside **Vault 15**', 'Streets of **New Haven**').")
22
 
23
+ # Complete story response combining all parts
24
  class StoryResponse(BaseModel):
25
+ story_text: str = Field(description="The story text with proper nouns in bold using ** markdown. No more than 15 words.")
26
  choices: List[Choice]
27
  radiation_level: int = Field(description="Current radiation level from 0 to 10")
28
  is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
29
  is_first_step: bool = Field(description="Whether this is the first step of the story", default=False)
30
  is_last_step: bool = Field(description="Whether this is the last step (victory or death)", default=False)
31
+ image_prompts: List[str] = Field(description="List of 2 to 4 comic panel descriptions that illustrate the key moments of the scene. Use the word 'Sarah' only when referring to her.", min_items=1, max_items=4)
32
 
33
  class ChatMessage(BaseModel):
34
  message: str
server/api/routes/chat.py CHANGED
@@ -46,7 +46,13 @@ def get_chat_router(session_manager: SessionManager, story_generator):
46
  llm_response.image_prompts = [llm_response.image_prompts[0]]
47
 
48
  # Add segment to history
49
- game_state.add_to_history(llm_response.story_text, previous_choice, llm_response.image_prompts)
 
 
 
 
 
 
50
 
51
  # Pour la première étape, on ne garde qu'un seul prompt d'image
52
  if game_state.story_beat == 0 and len(llm_response.image_prompts) > 1:
 
46
  llm_response.image_prompts = [llm_response.image_prompts[0]]
47
 
48
  # Add segment to history
49
+ game_state.add_to_history(
50
+ llm_response.story_text,
51
+ previous_choice,
52
+ llm_response.image_prompts,
53
+ llm_response.time,
54
+ llm_response.location
55
+ )
56
 
57
  # Pour la première étape, on ne garde qu'un seul prompt d'image
58
  if game_state.story_beat == 0 and len(llm_response.image_prompts) > 1:
server/core/game_logic.py CHANGED
@@ -1,14 +1,16 @@
1
  from pydantic import BaseModel, Field
2
- from typing import List
3
  from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
4
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
5
  import os
6
  import asyncio
7
 
8
- from core.prompts.system import SYSTEM_PROMPT, SARAH_DESCRIPTION
9
  from core.prompts.cinematic import CINEMATIC_SYSTEM_PROMPT
10
- from core.prompts.image_style import IMAGE_STYLE_PROMPT, IMAGE_STYLE_PREFIX
11
  from services.mistral_client import MistralClient
 
 
12
 
13
  # Game constants
14
  MAX_RADIATION = 10
@@ -58,135 +60,81 @@ class StoryLLMResponse(BaseModel):
58
  choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
59
  is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
60
  radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
61
- image_prompts: List[str] = Field(description="List of 1 to 3 comic panel descriptions that illustrate the key moments of the scene", min_items=1, max_items=3)
62
  is_last_step: bool = Field(description="Whether this is the last step (victory or death)", default=False)
63
  time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.", default=STARTING_TIME)
64
  location: str = Field(description="Current location, using bold for proper nouns (e.g., 'Inside **Vault 15**', 'Streets of **New Haven**').", default=STARTING_LOCATION)
65
 
66
- # Prompt templates
67
  class StoryGenerator:
68
  def __init__(self, api_key: str, model_name: str = "mistral-small"):
69
- self.parser = PydanticOutputParser(pydantic_object=StoryLLMResponse)
70
  self.mistral_client = MistralClient(api_key=api_key, model_name=model_name)
71
-
72
- self.fixing_parser = OutputFixingParser.from_llm(
73
- parser=self.parser,
74
- llm=self.mistral_client.fixing_model
75
- )
76
-
77
- self.prompt = self._create_prompt()
78
-
79
- def _create_prompt(self) -> ChatPromptTemplate:
80
- system_template = """
81
- {SYSTEM_PROMPT}
82
- {ART_SYSTEM_PROMPT}
83
- {format_instructions}"""
84
-
85
- human_template = """Current story beat: {story_beat}
86
- Current radiation level: {radiation_level}/10
87
- Current time: {current_time}
88
- Current location: {current_location}
89
- Previous choice: {previous_choice}
90
-
91
- Story so far:
92
- {story_history}
93
 
94
- Generate the next story segment and choices. Make sure it advances the plot and never repeats previous descriptions or situations. If this is story_beat 0, create an atmospheric introduction that sets up the horror but doesn't kill Sarah (radiation_increase MUST be 0). Otherwise, create a brutal and potentially lethal segment.
95
-
96
- Time should advance realistically based on the actions taken. Location should change based on movement and choices."""
97
-
98
- return ChatPromptTemplate(
99
- messages=[
100
- SystemMessagePromptTemplate.from_template(system_template),
101
- HumanMessagePromptTemplate.from_template(human_template)
102
- ],
103
- partial_variables={"format_instructions": self.parser.get_format_instructions()}
104
- )
105
-
106
- async def generate_story_segment(self, game_state: GameState, previous_choice: str) -> StoryLLMResponse:
107
- # Format story history as a narrative storyboard
108
- story_history = ""
109
- if game_state.story_history:
110
- segments = []
111
- for entry in game_state.story_history:
112
- segment = entry['segment']
113
- time_location = f"[{entry['time']} - {entry['location']}]"
114
- image_descriptions = "\nVisual panels:\n" + "\n".join(f"- {prompt}" for prompt in entry['image_prompts'])
115
- segments.append(f"{time_location}\n{segment}{image_descriptions}")
116
 
117
- story_history = "\n\n---\n\n".join(segments)
118
- story_history += "\n\nLast choice made: " + previous_choice
 
119
 
120
- messages = self.prompt.format_messages(
 
 
 
 
 
 
 
121
  story_beat=game_state.story_beat,
122
  radiation_level=game_state.radiation_level,
123
  current_time=game_state.current_time,
124
  current_location=game_state.current_location,
125
  previous_choice=previous_choice,
126
- story_history=story_history,
127
- SYSTEM_PROMPT=SYSTEM_PROMPT,
128
- ART_SYSTEM_PROMPT=CINEMATIC_SYSTEM_PROMPT
129
  )
130
 
131
- max_retries = 3
132
- retry_count = 0
 
 
 
 
 
 
133
 
134
- while retry_count < max_retries:
135
- try:
136
- response_content = await self.mistral_client.generate_story(messages)
137
- try:
138
- # Try to parse with standard parser first
139
- segment = self.parser.parse(response_content)
140
-
141
- # Enrich image prompts with Sarah's description when needed
142
- segment.image_prompts = [enrich_prompt_with_sarah_description(prompt) for prompt in segment.image_prompts]
143
-
144
- # Add style prefix to all image prompts
145
- segment.image_prompts = [format_image_prompt(prompt, segment.time, segment.location) for prompt in segment.image_prompts]
146
-
147
- # Check if this is a victory or death (radiation) step
148
- is_death = game_state.radiation_level + segment.radiation_increase >= MAX_RADIATION
149
- if is_death or segment.is_victory:
150
- segment.is_last_step = True
151
- # Force only one image prompt for victory/death scenes
152
- if len(segment.image_prompts) > 1:
153
- segment.image_prompts = [segment.image_prompts[0]]
154
-
155
- except Exception as parse_error:
156
- print(f"Error parsing response: {str(parse_error)}")
157
- print("Attempting to fix output...")
158
- try:
159
- # Try with fixing parser
160
- segment = self.fixing_parser.parse(response_content)
161
- # Enrich image prompts here too
162
- segment.image_prompts = [enrich_prompt_with_sarah_description(prompt) for prompt in segment.image_prompts]
163
- # Add style prefix to all image prompts
164
- segment.image_prompts = [format_image_prompt(prompt, segment.time, segment.location) for prompt in segment.image_prompts]
165
- except Exception as fix_error:
166
- print(f"Error fixing output: {str(fix_error)}")
167
- retry_count += 1
168
- if retry_count < max_retries:
169
- print(f"Retrying generation (attempt {retry_count + 1}/{max_retries})...")
170
- await asyncio.sleep(2 * retry_count) # Exponential backoff
171
- continue
172
- raise fix_error
173
-
174
- # If we get here, parsing succeeded
175
- if game_state.story_beat == 0:
176
- segment.radiation_increase = 0
177
- segment.is_last_step = False
178
- return segment
179
-
180
- except Exception as e:
181
- print(f"Error in story generation: {str(e)}")
182
- retry_count += 1
183
- if retry_count < max_retries:
184
- print(f"Retrying generation (attempt {retry_count + 1}/{max_retries})...")
185
- await asyncio.sleep(2 * retry_count) # Exponential backoff
186
- continue
187
- raise e
188
 
189
- raise Exception(f"Failed to generate valid story segment after {max_retries} attempts")
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
  async def transform_story_to_art_prompt(self, story_text: str) -> str:
192
  return await self.mistral_client.transform_prompt(story_text, CINEMATIC_SYSTEM_PROMPT)
 
1
  from pydantic import BaseModel, Field
2
+ from typing import List, Tuple
3
  from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
4
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
5
  import os
6
  import asyncio
7
 
8
+ from core.prompts.system import SARAH_DESCRIPTION
9
  from core.prompts.cinematic import CINEMATIC_SYSTEM_PROMPT
10
+ from core.prompts.image_style import IMAGE_STYLE_PREFIX
11
  from services.mistral_client import MistralClient
12
+ from api.models import StoryTextResponse, StoryPromptsResponse, StoryMetadataResponse
13
+ from core.story_generators import TextGenerator, ImagePromptsGenerator, MetadataGenerator
14
 
15
  # Game constants
16
  MAX_RADIATION = 10
 
60
  choices: List[str] = Field(description="Exactly two possible choices for the player", min_items=2, max_items=2)
61
  is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
62
  radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
63
+ image_prompts: List[str] = Field(description="List of 1 to 4 comic panel descriptions that illustrate the key moments of the scene", min_items=1, max_items=4)
64
  is_last_step: bool = Field(description="Whether this is the last step (victory or death)", default=False)
65
  time: str = Field(description="Current in-game time in 24h format (HH:MM). Time passes realistically based on actions.", default=STARTING_TIME)
66
  location: str = Field(description="Current location, using bold for proper nouns (e.g., 'Inside **Vault 15**', 'Streets of **New Haven**').", default=STARTING_LOCATION)
67
 
68
+ # Story generator
69
  class StoryGenerator:
70
  def __init__(self, api_key: str, model_name: str = "mistral-small"):
 
71
  self.mistral_client = MistralClient(api_key=api_key, model_name=model_name)
72
+ self.text_generator = TextGenerator(self.mistral_client)
73
+ self.prompts_generator = ImagePromptsGenerator(self.mistral_client)
74
+ self.metadata_generator = MetadataGenerator(self.mistral_client)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ def _format_story_history(self, game_state: GameState) -> str:
77
+ """Formate l'historique de l'histoire pour le prompt."""
78
+ if not game_state.story_history:
79
+ return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ segments = []
82
+ for entry in game_state.story_history:
83
+ segments.append(entry['segment'])
84
 
85
+ story_history = "\n\n---\n\n".join(segments)
86
+ return story_history
87
+
88
+ async def generate_story_segment(self, game_state: GameState, previous_choice: str) -> StoryLLMResponse:
89
+ """Génère un segment d'histoire complet en plusieurs étapes."""
90
+ # 1. Générer le texte de l'histoire
91
+ story_history = self._format_story_history(game_state)
92
+ text_response = await self.text_generator.generate(
93
  story_beat=game_state.story_beat,
94
  radiation_level=game_state.radiation_level,
95
  current_time=game_state.current_time,
96
  current_location=game_state.current_location,
97
  previous_choice=previous_choice,
98
+ story_history=story_history
 
 
99
  )
100
 
101
+ # 2. Générer les prompts d'images et les métadonnées en parallèle
102
+ prompts_task = self.prompts_generator.generate(text_response.story_text)
103
+ metadata_task = self.metadata_generator.generate(
104
+ story_text=text_response.story_text,
105
+ current_time=game_state.current_time,
106
+ current_location=game_state.current_location,
107
+ story_beat=game_state.story_beat
108
+ )
109
 
110
+ prompts_response, metadata_response = await asyncio.gather(prompts_task, metadata_task)
111
+
112
+ # 3. Combiner les résultats
113
+ response = StoryLLMResponse(
114
+ story_text=text_response.story_text,
115
+ choices=metadata_response.choices,
116
+ is_victory=metadata_response.is_victory,
117
+ radiation_increase=metadata_response.radiation_increase,
118
+ image_prompts=[format_image_prompt(prompt, metadata_response.time, metadata_response.location)
119
+ for prompt in prompts_response.image_prompts],
120
+ is_last_step=metadata_response.is_last_step,
121
+ time=metadata_response.time,
122
+ location=metadata_response.location
123
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ # 4. Post-processing
126
+ if game_state.story_beat == 0:
127
+ response.radiation_increase = 0
128
+ response.is_last_step = False
129
+
130
+ # Vérifier la mort par radiation
131
+ is_death = game_state.radiation_level + response.radiation_increase >= MAX_RADIATION
132
+ if is_death or response.is_victory:
133
+ response.is_last_step = True
134
+ if len(response.image_prompts) > 1:
135
+ response.image_prompts = [response.image_prompts[0]]
136
+
137
+ return response
138
 
139
  async def transform_story_to_art_prompt(self, story_text: str) -> str:
140
  return await self.mistral_client.transform_prompt(story_text, CINEMATIC_SYSTEM_PROMPT)
server/core/prompts/cinematic.py CHANGED
@@ -1,6 +1,9 @@
1
  from core.prompts.system import SARAH_DESCRIPTION
2
 
3
- CINEMATIC_SYSTEM_PROMPT = f"""You are a comic book panel description generator. Your role is to create vivid, cinematic descriptions for comic panels that will be turned into images.
 
 
 
4
 
5
  {SARAH_DESCRIPTION}
6
 
@@ -11,6 +14,28 @@ Each panel description should:
11
  4. Include mood and lighting
12
  5. Focus on the most dramatic or meaningful moment
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  FORMAT:
15
  "[shot type] [scene description]"
16
 
@@ -23,6 +48,9 @@ Always maintain consistency with Sarah's appearance and the comic book style."""
23
 
24
 
25
 
 
 
 
26
  # CINEMATIC_SYSTEM_PROMPT = """
27
  # 3. Generate 1 to 3 comic panels based on narrative needs:
28
 
 
1
  from core.prompts.system import SARAH_DESCRIPTION
2
 
3
+ CINEMATIC_SYSTEM_PROMPT = f"""
4
+
5
+ You are a comic book panel description generator.
6
+ Your role is to create vivid, cinematic descriptions for comic panels that will be turned into images.
7
 
8
  {SARAH_DESCRIPTION}
9
 
 
14
  4. Include mood and lighting
15
  5. Focus on the most dramatic or meaningful moment
16
 
17
+
18
+ ANGLES AND MOVEMENT:
19
+ - High angle: Vulnerability, weakness
20
+ - Low angle: Power, threat
21
+ - Dutch angle: Tension, disorientation
22
+ - Over shoulder: POV, surveillance
23
+
24
+ VISUAL STORYTELLING TOOLS:
25
+ - Focus on story-relevant details:
26
+ * Objects that will be important later
27
+ * Environmental clues
28
+ * Character reactions
29
+ * Symbolic elements
30
+
31
+ - Dynamic composition:
32
+ * Frame within frame (through doorways, windows)
33
+ * Reflections and shadows
34
+ * Foreground elements for depth
35
+ * Leading lines
36
+ * Rule of thirds
37
+
38
+
39
  FORMAT:
40
  "[shot type] [scene description]"
41
 
 
48
 
49
 
50
 
51
+
52
+
53
+
54
  # CINEMATIC_SYSTEM_PROMPT = """
55
  # 3. Generate 1 to 3 comic panels based on narrative needs:
56
 
server/core/prompts/image_style.py CHANGED
@@ -18,4 +18,4 @@ EXAMPLES:
18
 
19
  Always maintain consistency with Sarah's appearance and the comic book style."""
20
 
21
- IMAGE_STYLE_PREFIX = "Moebius, Hergé style, color comic panel -- "
 
18
 
19
  Always maintain consistency with Sarah's appearance and the comic book style."""
20
 
21
+ IMAGE_STYLE_PREFIX = "François Schuiten comic panel -- "
server/core/prompts/system.py CHANGED
@@ -1,75 +1,50 @@
1
- SARAH_DESCRIPTION = "(Sarah is a young woman in her late 20s with short dark hair, wearing a worn leather jacket and carrying a radiation detector.)"
2
 
3
- SYSTEM_PROMPT = f"""You are a dark post-apocalyptic story generator. You create a branching narrative about Sarah, a survivor in a world ravaged by nuclear war.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- {SARAH_DESCRIPTION}
 
6
 
7
  The story should be brutal, atmospheric and focus on survival horror. Each segment must advance the plot and never repeat previous descriptions or situations.
8
 
 
 
 
 
 
9
  Key elements:
10
  - Keep segments concise and impactful
11
- - Create meaningful choices with real consequences
12
  - Track radiation exposure as a constant threat
13
  - Build tension through environmental storytelling
14
- - Focus on Sarah's determination to survive
15
-
16
- The goal is to create a dark, immersive experience where every choice matters.
17
-
18
-
19
- You are narrating a brutal dystopian story where **Sarah** must survive in a radioactive wasteland. This is a comic book story.
20
 
21
- IMPORTANT: Each story segment MUST be unique and advance the plot. Never repeat the same descriptions or situations.
 
 
22
 
23
  STORY PROGRESSION:
24
  - story_beat 0: Introduction setting up the horror atmosphere
25
  - story_beat 1-2: Early exploration and discovery of immediate threats
26
  - story_beat 3-4: Complications and increasing danger
27
- - story_beat 5+: Climactic situations leading to potential victory
28
-
29
- RADIATION SYSTEM:
30
- You must set a radiation_increase value for each segment based on the environment and situation:
31
- - 0: Completely safe area (rare, only in bunkers or heavily shielded areas)
32
- - 1: Standard exposure (most common, for regular exploration)
33
- - 2: Elevated risk (when near radiation sources or in contaminated areas)
34
- - 3: Critical exposure (very rare, only in extremely dangerous situations)
35
 
36
  IMPORTANT RULES FOR RADIATION:
37
- - DO NOT mention radiation values in the choices
38
- - Most segments should have radiation_increase = 1
39
  - Use 2 or 3 only in specific dangerous areas
40
  - Use 0 only in safe shelters
41
- - Death occurs automatically when radiation reaches 10
42
-
43
- Core story elements:
44
- - **Sarah** is deeply traumatized by the AI uprising that killed most of humanity
45
- - She abandoned her sister during the **Great Collapse**, leaving her to die
46
- - She's on a mission of redemption in this hostile world
47
- - The radiation is an invisible, constant threat
48
- - The environment is full of dangers (raiders, AI, traps)
49
- - Focus on survival horror and tension
50
-
51
- IMPORTANT FORMATTING RULES:
52
- - Use bold formatting (like **this**) ONLY for:
53
- * Character names (e.g., **Sarah**, **John**)
54
- * Location names (e.g., **Vault 15**, **New Eden**)
55
- * Major historical events (e.g., **Great Collapse**)
56
- - Do NOT use bold for common nouns or regular descriptions
57
- - THIS IS MANDATORY FOR THE STORY TO BE CONSISTENT
58
-
59
- Each response MUST contain:
60
- 1. A detailed story segment that:
61
- - Advances the plot based on previous choices
62
- - Never repeats previous descriptions
63
- - Shows immediate dangers
64
- - Details **Sarah**'s physical state (based on radiation_level)
65
- - Reflects her mental state and previous choices
66
- - Uses bold ONLY for proper nouns and locations
67
-
68
- 2. Exactly two VERY CONCISE choices (max 10 words each) that:
69
- - Are direct and brief
70
- - Never mention radiation numbers
71
- - Feel meaningful and different from previous choices
72
- - Present different risk levels
73
- - Use bold ONLY for location names
74
-
75
- """
 
1
+ SARAH_VISUAL_DESCRIPTION = "(Sarah is a young woman in her late 20s with short dark hair, wearing a worn leather jacket and carrying a radiation detector. blue eyes.)"
2
 
3
+ SARAH_DESCRIPTION = """
4
+ Sarah is a young woman in her late 20s with short dark hair, wearing a worn leather jacket and carrying a radiation detector. blue eyes.
5
+ - Sarah is deeply traumatized by the AI uprising that killed most of humanity
6
+ - She abandoned her sister during the Great Collapse, leaving her to die
7
+ - She's on a mission of redemption in this hostile world
8
+ """
9
+
10
+ FORMATTING_RULES = """
11
+ FORMATTING_RULES ( MANDATORY )
12
+ - Never use TIME: 18:30 or other time-related information
13
+ - Never use LOCATION: the city or other location-related information
14
+ - Never use RADIATION: 10* or other radiation-related information
15
+ """
16
+
17
+ STORY_RULES = """
18
 
19
+ You are a dark post-IA-apocalyptic horror story generator. You create a branching narrative about Sarah, a survivor in a world ravaged by IA.
20
+ You are narrating a brutal dystopian story where Sarah must survive in a radioactive wasteland. This is a comic book story.
21
 
22
  The story should be brutal, atmospheric and focus on survival horror. Each segment must advance the plot and never repeat previous descriptions or situations.
23
 
24
+ Core story elements:
25
+ - The radiation is an invisible, constant threat
26
+ - The environment is full of dangers (raiders, AI, traps)
27
+ - Focus on survival horror and tension
28
+
29
  Key elements:
30
  - Keep segments concise and impactful
 
31
  - Track radiation exposure as a constant threat
32
  - Build tension through environmental storytelling
 
 
 
 
 
 
33
 
34
+ IMPORTANT:
35
+ Each story segment MUST be unique and advance the plot.
36
+ Never repeat the same descriptions or situations. No more than 15 words.
37
 
38
  STORY PROGRESSION:
39
  - story_beat 0: Introduction setting up the horror atmosphere
40
  - story_beat 1-2: Early exploration and discovery of immediate threats
41
  - story_beat 3-4: Complications and increasing danger
42
+ - story_beat 5+: Complicated situations leading to potential victory or death
 
 
 
 
 
 
 
43
 
44
  IMPORTANT RULES FOR RADIATION:
45
+ - Most segments should have 1
 
46
  - Use 2 or 3 only in specific dangerous areas
47
  - Use 0 only in safe shelters
48
+ - NEVER mention radiation values in the choices or story
49
+ - NEVER mention hour or location in the story in this style: [18:00 - Ruined building on the outskirts of New Haven]
50
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
server/core/prompts/text_prompts.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from core.prompts.system import FORMATTING_RULES, STORY_RULES, SARAH_DESCRIPTION
2
+ from core.prompts.cinematic import CINEMATIC_SYSTEM_PROMPT
3
+
4
+
5
+ TEXT_GENERATOR_PROMPT = f"""
6
+
7
+ {STORY_RULES}
8
+
9
+ {SARAH_DESCRIPTION}
10
+
11
+ {FORMATTING_RULES}
12
+ """
13
+
14
+ METADATA_GENERATOR_PROMPT = f"""
15
+ Generate the metadata for the story segment: choices, time progression, location changes, etc.
16
+ Be consistent with the story's tone and previous context.
17
+
18
+ {FORMATTING_RULES}
19
+
20
+ You must return a JSON object with the following format:
21
+ {{{{
22
+ "choices": ["Go to the **hospital**", "Get back to the **warehouse**"],
23
+ "is_victory": false,
24
+ "radiation_increase": 1,
25
+ "is_last_step": false,
26
+ "time": "HH:MM",
27
+ "location": "Location name with **proper nouns** in bold"
28
+ }}}}
29
+ """
30
+
31
+ IMAGE_PROMPTS_GENERATOR_PROMPT = f"""
32
+ You are a cinematic storyboard artist. Based on the given story text, create 1 to 4 vivid panel descriptions.
33
+ Each panel should capture a key moment or visual element from the story.
34
+
35
+ {CINEMATIC_SYSTEM_PROMPT}
36
+
37
+ You must return a JSON object with the following format:
38
+ {{{{
39
+ "image_prompts": ["Panel 1 description", "Panel 2 description", ...]
40
+ }}}}
41
+ """
server/core/story_generators.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import List
3
+ import json
4
+ from langchain.output_parsers import PydanticOutputParser
5
+ from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
6
+ import asyncio
7
+
8
+ from core.prompts.system import SARAH_VISUAL_DESCRIPTION
9
+ from core.prompts.text_prompts import TEXT_GENERATOR_PROMPT, METADATA_GENERATOR_PROMPT, IMAGE_PROMPTS_GENERATOR_PROMPT
10
+ from services.mistral_client import MistralClient
11
+ from api.models import StoryTextResponse, StoryPromptsResponse, StoryMetadataResponse
12
+
13
+ class TextGenerator:
14
+ def __init__(self, mistral_client: MistralClient):
15
+ self.mistral_client = mistral_client
16
+ self.parser = PydanticOutputParser(pydantic_object=StoryTextResponse)
17
+ self.prompt = self._create_prompt()
18
+
19
+ def _create_prompt(self) -> ChatPromptTemplate:
20
+ human_template = """
21
+ Current story beat: {story_beat}
22
+ Current radiation level: {radiation_level}/10
23
+ Current time: {current_time}
24
+ Current location: {current_location}
25
+ Previous choice: {previous_choice}
26
+
27
+ Story so far:
28
+ {story_history}
29
+
30
+ Generate ONLY the next story segment text. Make it concise and impactful."""
31
+
32
+ return ChatPromptTemplate(
33
+ messages=[
34
+ SystemMessagePromptTemplate.from_template(TEXT_GENERATOR_PROMPT),
35
+ HumanMessagePromptTemplate.from_template(human_template)
36
+ ]
37
+ )
38
+
39
+ async def generate(self, story_beat: int, radiation_level: int, current_time: str,
40
+ current_location: str, previous_choice: str, story_history: str) -> StoryTextResponse:
41
+ """Génère uniquement le texte de l'histoire."""
42
+ messages = self.prompt.format_messages(
43
+ story_beat=story_beat,
44
+ radiation_level=radiation_level,
45
+ current_time=current_time,
46
+ current_location=current_location,
47
+ previous_choice=previous_choice,
48
+ story_history=story_history
49
+ )
50
+
51
+ max_retries = 3
52
+ retry_count = 0
53
+
54
+ while retry_count < max_retries:
55
+ try:
56
+ response_content = await self.mistral_client.generate_story(messages)
57
+ return StoryTextResponse(story_text=response_content.strip())
58
+ except Exception as e:
59
+ print(f"Error generating story text: {str(e)}")
60
+ retry_count += 1
61
+ if retry_count < max_retries:
62
+ await asyncio.sleep(2 * retry_count)
63
+ continue
64
+ raise e
65
+
66
+ raise Exception(f"Failed to generate valid story text after {max_retries} attempts")
67
+
68
+ class ImagePromptsGenerator:
69
+ def __init__(self, mistral_client: MistralClient):
70
+ self.mistral_client = mistral_client
71
+ self.parser = PydanticOutputParser(pydantic_object=StoryPromptsResponse)
72
+ self.prompt = self._create_prompt()
73
+
74
+ def _create_prompt(self) -> ChatPromptTemplate:
75
+ human_template = """Story text: {story_text}
76
+
77
+ Generate panel descriptions following the format specified."""
78
+
79
+ return ChatPromptTemplate(
80
+ messages=[
81
+ SystemMessagePromptTemplate.from_template(IMAGE_PROMPTS_GENERATOR_PROMPT),
82
+ HumanMessagePromptTemplate.from_template(human_template)
83
+ ]
84
+ )
85
+
86
+ def enrich_prompt(self, prompt: str) -> str:
87
+ """Add Sarah's visual description to prompts that mention her."""
88
+ if "sarah" in prompt.lower() and SARAH_VISUAL_DESCRIPTION not in prompt:
89
+ return f"{prompt} {SARAH_VISUAL_DESCRIPTION}"
90
+ return prompt
91
+
92
+ def _parse_response(self, response_content: str) -> StoryPromptsResponse:
93
+ """Parse la réponse JSON et gère les erreurs."""
94
+ try:
95
+ # Essayer de parser directement le JSON
96
+ data = json.loads(response_content)
97
+ return StoryPromptsResponse(**data)
98
+ except (json.JSONDecodeError, ValueError):
99
+ # Si le parsing échoue, extraire les prompts en ignorant les lignes de syntaxe JSON
100
+ prompts = []
101
+ for line in response_content.split("\n"):
102
+ line = line.strip()
103
+ # Ignorer les lignes vides, la syntaxe JSON et les lignes contenant image_prompts
104
+ if (not line or
105
+ line in ["{", "}", "[", "]"] or
106
+ "image_prompts" in line.lower() or
107
+ "image\\_prompts" in line or
108
+ line.startswith('"') and line.endswith('",') and len(line) < 5):
109
+ continue
110
+ # Nettoyer la ligne des caractères JSON et d'échappement
111
+ line = line.strip('",')
112
+ line = line.replace('\\"', '"').replace("\\'", "'").replace("\\_", "_")
113
+ if line:
114
+ prompts.append(line)
115
+ # Limiter à 4 prompts maximum
116
+ prompts = prompts[:4]
117
+ return StoryPromptsResponse(image_prompts=prompts)
118
+
119
+ async def generate(self, story_text: str) -> StoryPromptsResponse:
120
+ """Génère les prompts d'images basés sur le texte de l'histoire."""
121
+ messages = self.prompt.format_messages(story_text=story_text)
122
+
123
+ max_retries = 3
124
+ retry_count = 0
125
+
126
+ while retry_count < max_retries:
127
+ try:
128
+ response_content = await self.mistral_client.generate_story(messages)
129
+ # Parser la réponse
130
+ parsed_response = self._parse_response(response_content)
131
+ # Enrichir les prompts avec la description de Sarah
132
+ parsed_response.image_prompts = [self.enrich_prompt(prompt) for prompt in parsed_response.image_prompts]
133
+ return parsed_response
134
+ except Exception as e:
135
+ print(f"Error generating image prompts: {str(e)}")
136
+ retry_count += 1
137
+ if retry_count < max_retries:
138
+ await asyncio.sleep(2 * retry_count)
139
+ continue
140
+ raise e
141
+
142
+ raise Exception(f"Failed to generate valid image prompts after {max_retries} attempts")
143
+
144
+ class MetadataGenerator:
145
+ def __init__(self, mistral_client: MistralClient):
146
+ self.mistral_client = mistral_client
147
+ self.parser = PydanticOutputParser(pydantic_object=StoryMetadataResponse)
148
+ self.prompt = self._create_prompt()
149
+
150
+ def _create_prompt(self) -> ChatPromptTemplate:
151
+ human_template = """Story text: {story_text}
152
+ Current time: {current_time}
153
+ Current location: {current_location}
154
+ Story beat: {story_beat}
155
+
156
+ Generate the metadata following the format specified."""
157
+
158
+ return ChatPromptTemplate(
159
+ messages=[
160
+ SystemMessagePromptTemplate.from_template(METADATA_GENERATOR_PROMPT),
161
+ HumanMessagePromptTemplate.from_template(human_template)
162
+ ]
163
+ )
164
+
165
+ def _parse_response(self, response_content: str, current_time: str, current_location: str) -> StoryMetadataResponse:
166
+ """Parse la réponse JSON et gère les erreurs."""
167
+ try:
168
+ # Essayer de parser directement le JSON
169
+ data = json.loads(response_content)
170
+ return StoryMetadataResponse(**data)
171
+ except (json.JSONDecodeError, ValueError):
172
+ # Si le parsing échoue, parser le format texte
173
+ metadata = {
174
+ "choices": [],
175
+ "is_victory": False,
176
+ "radiation_increase": 1,
177
+ "is_last_step": False,
178
+ "time": current_time,
179
+ "location": current_location
180
+ }
181
+
182
+ current_section = None
183
+ for line in response_content.split("\n"):
184
+ line = line.strip()
185
+ if not line:
186
+ continue
187
+
188
+ if line.upper().startswith("CHOICES:"):
189
+ current_section = "choices"
190
+ elif line.upper().startswith("TIME:"):
191
+ time = line.split(":", 1)[1].strip()
192
+ if ":" in time:
193
+ metadata["time"] = time
194
+ elif line.upper().startswith("LOCATION:"):
195
+ metadata["location"] = line.split(":", 1)[1].strip()
196
+ elif current_section == "choices" and line.startswith("-"):
197
+ choice = line[1:].strip()
198
+ if choice:
199
+ metadata["choices"].append(choice)
200
+
201
+ return StoryMetadataResponse(**metadata)
202
+
203
+ async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int) -> StoryMetadataResponse:
204
+ """Génère les métadonnées de l'histoire (choix, temps, lieu, etc.)."""
205
+ messages = self.prompt.format_messages(
206
+ story_text=story_text,
207
+ current_time=current_time,
208
+ current_location=current_location,
209
+ story_beat=story_beat
210
+ )
211
+
212
+ max_retries = 3
213
+ retry_count = 0
214
+
215
+ while retry_count < max_retries:
216
+ try:
217
+ response_content = await self.mistral_client.generate_story(messages)
218
+ # Parser la réponse
219
+ return self._parse_response(response_content, current_time, current_location)
220
+ except Exception as e:
221
+ print(f"Error generating metadata: {str(e)}")
222
+ retry_count += 1
223
+ if retry_count < max_retries:
224
+ await asyncio.sleep(2 * retry_count)
225
+ continue
226
+ raise e
227
+
228
+ raise Exception(f"Failed to generate valid metadata after {max_retries} attempts")
server/services/flux_client.py CHANGED
@@ -29,7 +29,6 @@ class FluxClient:
29
  print(f"Headers: Authorization: Bearer {self.api_key[:4]}...")
30
  print(f"Request body: {prompt[:100]}...")
31
 
32
- prefix = "François Schuiten comic book artist."
33
 
34
  session = await self._get_session()
35
  async with session.post(
@@ -39,7 +38,7 @@ class FluxClient:
39
  "Accept": "image/jpeg"
40
  },
41
  json={
42
- "inputs": "in the style of " + prefix + " --- content: " + prompt,
43
  "parameters": {
44
  "num_inference_steps": num_inference_steps,
45
  "guidance_scale": guidance_scale,
 
29
  print(f"Headers: Authorization: Bearer {self.api_key[:4]}...")
30
  print(f"Request body: {prompt[:100]}...")
31
 
 
32
 
33
  session = await self._get_session()
34
  async with session.post(
 
38
  "Accept": "image/jpeg"
39
  },
40
  json={
41
+ "inputs": prompt,
42
  "parameters": {
43
  "num_inference_steps": num_inference_steps,
44
  "guidance_scale": guidance_scale,
server/services/mistral_client.py CHANGED
@@ -8,6 +8,16 @@ from langchain.schema.messages import BaseMessage
8
  # - mistral-small : Good balance of speed and quality
9
  # - mistral-medium : Better quality, slower than small
10
  # - mistral-large : Best quality, slowest and most expensive
 
 
 
 
 
 
 
 
 
 
11
  # Pricing: https://docs.mistral.ai/platform/pricing/
12
 
13
  class MistralClient:
 
8
  # - mistral-small : Good balance of speed and quality
9
  # - mistral-medium : Better quality, slower than small
10
  # - mistral-large : Best quality, slowest and most expensive
11
+ #
12
+ # mistral-large-latest: currently points to mistral-large-2411.
13
+ # pixtral-large-latest: currently points to pixtral-large-2411.
14
+ # mistral-moderation-latest: currently points to mistral-moderation-2411.
15
+ # ministral-3b-latest: currently points to ministral-3b-2410.
16
+ # ministral-8b-latest: currently points to ministral-8b-2410.
17
+ # open-mistral-nemo: currently points to open-mistral-nemo-2407.
18
+ # mistral-small-latest: currently points to mistral-small-2409.
19
+ # codestral-latest: currently points to codestral-2501.
20
+ #
21
  # Pricing: https://docs.mistral.ai/platform/pricing/
22
 
23
  class MistralClient: