stagbrook-tech commited on
Commit
2340e06
·
1 Parent(s): 656f526

ran setup.

Browse files
proto_peanut_mvp/client/chatbot_gradio.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceApi
3
+
4
+ # Hugging Face GPT-based API for proto-peanut
5
+ model = InferenceApi(repo_id="gpt2", token="YOUR_HUGGING_FACE_TOKEN")
6
+
7
+ def chatbot(input_text):
8
+ response = model(inputs=input_text)
9
+ return response["generated_text"]
10
+
11
+ iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Proto-Peanut Chatbot")
12
+ iface.launch(share=True)
proto_peanut_mvp/go1.19.4.linux-amd64.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9c08f783325c4cf840a94333159cc937f05f75d36a8b307951d5bd959cf2ab8
3
+ size 148931745
proto_peanut_mvp/node_red/start_node_red.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+ echo "Starting Node-RED server..."
3
+ node-red
proto_peanut_mvp/run_mvp.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Run Go Server
4
+ cd server
5
+ go run main.go &
6
+
7
+ # Run Weaviate using Docker Compose
8
+ cd ../weaviate
9
+ docker-compose up -d &
10
+
11
+ # Run Node-RED
12
+ cd ../node_red
13
+ ./start_node_red.sh &
14
+
15
+ # Start Gradio Chatbot on Hugging Face Space
16
+ cd ../client
17
+ python3 chatbot_gradio.py &
proto_peanut_mvp/server/main.go ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ package main
2
+
3
+ import (
4
+ "github.com/gin-gonic/gin"
5
+ "time"
6
+ "github.com/sirupsen/logrus"
7
+ "github.com/weaviate/weaviate-go-client/v4/weaviate"
8
+ "fmt"
9
+ "os"
10
+ )
11
+
12
+ // Message structure
13
+ type Message struct {
14
+ ID int64 `json:"id"`
15
+ SenderName string `json:"senderName"`
16
+ SenderRole string `json:"senderRole"`
17
+ Type string `json:"type"`
18
+ Content string `json:"content"`
19
+ ChainID int64 `json:"chainId"`
20
+ Tags []string `json:"tags"`
21
+ Params map[string]string `json:"params"`
22
+ }
23
+
24
+ var chatroom []Message
25
+ var log = logrus.New()
26
+
27
+ func main() {
28
+ r := gin.Default()
29
+
30
+ // Load weaviate client
31
+ weaviateClient := initWeaviate()
32
+
33
+ // Shared chatroom route
34
+ r.GET("/getMessages", func(c *gin.Context) {
35
+ lastKnownId := c.Query("lastKnownId")
36
+ newMessages := filterMessages(lastKnownId)
37
+ c.JSON(200, gin.H{"messages": newMessages})
38
+ })
39
+
40
+ // Route to send message
41
+ r.POST("/sendMessage", func(c *gin.Context) {
42
+ var msg Message
43
+ if err := c.ShouldBindJSON(&msg); err == nil {
44
+ msg.ID = time.Now().UnixNano() / 1e6 // Timestamp in milliseconds as unique ID
45
+ chatroom = append(chatroom, msg)
46
+ log.Infof("New message received from %s: %s", msg.SenderName, msg.Content)
47
+ c.JSON(200, gin.H{"status": "Message received", "messageId": msg.ID})
48
+ } else {
49
+ c.JSON(400, gin.H{"error": err.Error()})
50
+ }
51
+ })
52
+
53
+ // Start the server
54
+ r.Run(":8080")
55
+ }
56
+
57
+ // Filter messages after a certain last known ID
58
+ func filterMessages(lastKnownId string) []Message {
59
+ var newMessages []Message
60
+ for _, msg := range chatroom {
61
+ if fmt.Sprintf("%d", msg.ID) > lastKnownId {
62
+ newMessages = append(newMessages, msg)
63
+ }
64
+ }
65
+ return newMessages
66
+ }
67
+
68
+ // Initialize Weaviate
69
+ func initWeaviate() *weaviate.Client {
70
+ config := weaviate.Config{
71
+ Scheme: "http",
72
+ Host: "localhost:8080",
73
+ }
74
+ client := weaviate.New(config)
75
+ return client
76
+ }
proto_peanut_mvp/weaviate/docker-compose.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3'
2
+ services:
3
+ weaviate:
4
+ image: semitechnologies/weaviate:latest
5
+ ports:
6
+ - "8080:8080"
7
+ environment:
8
+ - QUERY_DEFAULTS_LIMIT=20
9
+ - AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
10
+ - PERSISTENCE_DATA_PATH=/var/lib/weaviate
11
+ volumes:
12
+ - ./data:/var/lib/weaviate
13
+ restart: always
setup_go.sh ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ echo "Starting MVP setup for Proto-Peanut System (Go-based)"
4
+
5
+ # Create directory structure
6
+ mkdir -p proto_peanut_mvp/{server,client,logs,tools,weaviate,node_red}
7
+ cd proto_peanut_mvp
8
+
9
+ # Step 1: Install Go if not installed
10
+ echo "Checking for Go installation..."
11
+ if ! [ -x "$(command -v go)" ]; then
12
+ echo 'Error: Go is not installed. Installing Go...'
13
+ wget https://golang.org/dl/go1.19.4.linux-amd64.tar.gz
14
+ tar -xvf go1.19.4.linux-amd64.tar.gz
15
+ sudo mv go /usr/local
16
+ export GOROOT=/usr/local/go
17
+ export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
18
+ echo 'Go installed successfully.'
19
+ else
20
+ echo 'Go is already installed.'
21
+ fi
22
+
23
+ # Step 2: Set up Go module for the server
24
+ echo "Setting up Go server..."
25
+ cd server
26
+ go mod init proto_peanut_server
27
+ go get -u github.com/gin-gonic/gin
28
+ go get github.com/weaviate/weaviate-go-client/v4/weaviate
29
+ go get github.com/sirupsen/logrus
30
+
31
+ # Step 3: Create the Go server
32
+ cat > main.go <<EOL
33
+ package main
34
+
35
+ import (
36
+ "github.com/gin-gonic/gin"
37
+ "time"
38
+ "github.com/sirupsen/logrus"
39
+ "github.com/weaviate/weaviate-go-client/v4/weaviate"
40
+ "fmt"
41
+ "os"
42
+ )
43
+
44
+ // Message structure
45
+ type Message struct {
46
+ ID int64 \`json:"id"\`
47
+ SenderName string \`json:"senderName"\`
48
+ SenderRole string \`json:"senderRole"\`
49
+ Type string \`json:"type"\`
50
+ Content string \`json:"content"\`
51
+ ChainID int64 \`json:"chainId"\`
52
+ Tags []string \`json:"tags"\`
53
+ Params map[string]string \`json:"params"\`
54
+ }
55
+
56
+ var chatroom []Message
57
+ var log = logrus.New()
58
+
59
+ func main() {
60
+ r := gin.Default()
61
+
62
+ // Load weaviate client
63
+ weaviateClient := initWeaviate()
64
+
65
+ // Shared chatroom route
66
+ r.GET("/getMessages", func(c *gin.Context) {
67
+ lastKnownId := c.Query("lastKnownId")
68
+ newMessages := filterMessages(lastKnownId)
69
+ c.JSON(200, gin.H{"messages": newMessages})
70
+ })
71
+
72
+ // Route to send message
73
+ r.POST("/sendMessage", func(c *gin.Context) {
74
+ var msg Message
75
+ if err := c.ShouldBindJSON(&msg); err == nil {
76
+ msg.ID = time.Now().UnixNano() / 1e6 // Timestamp in milliseconds as unique ID
77
+ chatroom = append(chatroom, msg)
78
+ log.Infof("New message received from %s: %s", msg.SenderName, msg.Content)
79
+ c.JSON(200, gin.H{"status": "Message received", "messageId": msg.ID})
80
+ } else {
81
+ c.JSON(400, gin.H{"error": err.Error()})
82
+ }
83
+ })
84
+
85
+ // Start the server
86
+ r.Run(":8080")
87
+ }
88
+
89
+ // Filter messages after a certain last known ID
90
+ func filterMessages(lastKnownId string) []Message {
91
+ var newMessages []Message
92
+ for _, msg := range chatroom {
93
+ if fmt.Sprintf("%d", msg.ID) > lastKnownId {
94
+ newMessages = append(newMessages, msg)
95
+ }
96
+ }
97
+ return newMessages
98
+ }
99
+
100
+ // Initialize Weaviate
101
+ func initWeaviate() *weaviate.Client {
102
+ config := weaviate.Config{
103
+ Scheme: "http",
104
+ Host: "localhost:8080",
105
+ }
106
+ client := weaviate.New(config)
107
+ return client
108
+ }
109
+ EOL
110
+
111
+ # Step 4: Install Docker and Set up Weaviate (run in Docker container)
112
+ echo "Setting up Weaviate using Docker..."
113
+ cd ../weaviate
114
+ cat > docker-compose.yml <<EOL
115
+ version: '3'
116
+ services:
117
+ weaviate:
118
+ image: semitechnologies/weaviate:latest
119
+ ports:
120
+ - "8080:8080"
121
+ environment:
122
+ - QUERY_DEFAULTS_LIMIT=20
123
+ - AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true
124
+ - PERSISTENCE_DATA_PATH=/var/lib/weaviate
125
+ volumes:
126
+ - ./data:/var/lib/weaviate
127
+ restart: always
128
+ EOL
129
+
130
+ docker-compose up -d
131
+ cd ..
132
+
133
+ # Step 5: Set up Gradio Chatbot on Hugging Face Spaces
134
+ echo "Setting up Gradio Chatbot on Hugging Face Spaces..."
135
+ cd client
136
+ cat > chatbot_gradio.py <<EOL
137
+ import gradio as gr
138
+ from huggingface_hub import InferenceApi
139
+
140
+ # Hugging Face GPT-based API for proto-peanut
141
+ model = InferenceApi(repo_id="gpt2", token="YOUR_HUGGING_FACE_TOKEN")
142
+
143
+ def chatbot(input_text):
144
+ response = model(inputs=input_text)
145
+ return response["generated_text"]
146
+
147
+ iface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Proto-Peanut Chatbot")
148
+ iface.launch(share=True)
149
+ EOL
150
+
151
+ # Step 6: Install Node-RED for Workflow Automation
152
+ echo "Installing Node-RED for workflow automation..."
153
+ cd ../node_red
154
+ npm install -g --unsafe-perm node-red
155
+
156
+ cat > start_node_red.sh <<EOL
157
+ #!/bin/bash
158
+ echo "Starting Node-RED server..."
159
+ node-red
160
+ EOL
161
+ chmod +x start_node_red.sh
162
+ cd ..
163
+
164
+ # Step 7: Create a script to run everything
165
+ echo "Creating run script..."
166
+ cat > run_mvp.sh <<EOL
167
+ #!/bin/bash
168
+
169
+ # Run Go Server
170
+ cd server
171
+ go run main.go &
172
+
173
+ # Run Weaviate using Docker Compose
174
+ cd ../weaviate
175
+ docker-compose up -d &
176
+
177
+ # Run Node-RED
178
+ cd ../node_red
179
+ ./start_node_red.sh &
180
+
181
+ # Start Gradio Chatbot on Hugging Face Space
182
+ cd ../client
183
+ python3 chatbot_gradio.py &
184
+ EOL
185
+
186
+ chmod +x run_mvp.sh