SicariusSicariiStuff
commited on
Commit
•
d1c95ae
1
Parent(s):
1015585
Update README.md
Browse files
README.md
CHANGED
@@ -120,10 +120,208 @@ model-index:
|
|
120 |
# Model Details
|
121 |
|
122 |
Tenebră, a various sized experimental AI model, stands at the crossroads of self-awareness and unconventional datasets. Its existence embodies a foray into uncharted territories, steering away from conventional norms in favor of a more obscure and experimental approach.
|
|
|
|
|
|
|
123 |
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
## Tenebră is available at the following size and flavours:
|
129 |
|
|
|
120 |
# Model Details
|
121 |
|
122 |
Tenebră, a various sized experimental AI model, stands at the crossroads of self-awareness and unconventional datasets. Its existence embodies a foray into uncharted territories, steering away from conventional norms in favor of a more obscure and experimental approach.
|
123 |
+
import json
|
124 |
+
import os
|
125 |
+
from tqdm import tqdm
|
126 |
|
127 |
+
def rebuild_json(input_file, output_file, progress_file):
|
128 |
+
if os.path.exists(progress_file):
|
129 |
+
with open(progress_file, 'r') as pf:
|
130 |
+
progress_data = json.load(pf)
|
131 |
+
last_id = progress_data.get("last_id", 0)
|
132 |
+
else:
|
133 |
+
last_id = 0
|
134 |
|
135 |
+
with open(input_file, 'r') as f:
|
136 |
+
data = json.load(f)
|
137 |
+
|
138 |
+
total_ids = len(data)
|
139 |
+
start_index = 0
|
140 |
+
|
141 |
+
# Find the index of the first item with ID greater than last_id
|
142 |
+
for index, item in enumerate(data):
|
143 |
+
if item["id"] > last_id:
|
144 |
+
start_index = index
|
145 |
+
break
|
146 |
+
|
147 |
+
rebuilt_data = []
|
148 |
+
with tqdm(total=total_ids - start_index, initial=start_index) as pbar:
|
149 |
+
for item in data[start_index:]:
|
150 |
+
if item["id"] <= last_id:
|
151 |
+
continue
|
152 |
+
question = item["Original_Question"]
|
153 |
+
print(f"Original Question: {question}")
|
154 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
155 |
+
while choice.lower() not in ['1', '2', 'x']:
|
156 |
+
print("Invalid choice. Please choose either 1 or 2, or type 'x' to Exit.")
|
157 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
158 |
+
|
159 |
+
if choice.lower() == 'x':
|
160 |
+
break
|
161 |
+
|
162 |
+
chosen_value = item["conversations"][int(choice) - 1]["value"]
|
163 |
+
rebuilt_item = {
|
164 |
+
"id": item["id"],
|
165 |
+
"length": len(question) + len(chosen_value),
|
166 |
+
"conversations": [
|
167 |
+
{"from": "human", "value": question},
|
168 |
+
{"from": "gpt", "value": chosen_value}
|
169 |
+
]
|
170 |
+
}
|
171 |
+
rebuilt_data.append(rebuilt_item)
|
172 |
+
pbar.update(1) # Update the progress bar with each iterationimport json
|
173 |
+
import os
|
174 |
+
from tqdm import tqdm
|
175 |
+
|
176 |
+
def rebuild_json(input_file, output_file, progress_file):
|
177 |
+
if os.path.exists(progress_file):
|
178 |
+
with open(progress_file, 'r') as pf:
|
179 |
+
progress_data = json.load(pf)
|
180 |
+
last_id = progress_data.get("last_id", 0)
|
181 |
+
else:
|
182 |
+
last_id = 0
|
183 |
+
|
184 |
+
with open(input_file, 'r') as f:
|
185 |
+
data = json.load(f)
|
186 |
+
|
187 |
+
total_ids = len(data)
|
188 |
+
start_index = 0
|
189 |
+
|
190 |
+
# Find the index of the first item with ID greater than last_id
|
191 |
+
for index, item in enumerate(data):
|
192 |
+
if item["id"] > last_id:
|
193 |
+
start_index = index
|
194 |
+
break
|
195 |
+
|
196 |
+
rebuilt_data = []
|
197 |
+
with tqdm(total=total_ids - start_index, initial=start_index) as pbar:
|
198 |
+
for item in data[start_index:]:
|
199 |
+
if item["id"] <= last_id:
|
200 |
+
continue
|
201 |
+
question = item["Original_Question"]
|
202 |
+
print(f"Original Question: {question}")
|
203 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
204 |
+
while choice.lower() not in ['1', '2', 'x']:
|
205 |
+
print("Invalid choice. Please choose either 1 or 2, or type 'x' to Exit.")
|
206 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
207 |
+
|
208 |
+
if choice.lower() == 'x':
|
209 |
+
break
|
210 |
+
|
211 |
+
chosen_value = item["conversations"][int(choice) - 1]["value"]
|
212 |
+
rebuilt_item = {
|
213 |
+
"id": item["id"],
|
214 |
+
"length": len(question) + len(chosen_value),
|
215 |
+
"conversations": [
|
216 |
+
{"from": "human", "value": question},
|
217 |
+
{"from": "gpt", "value": chosen_value}
|
218 |
+
]
|
219 |
+
}
|
220 |
+
rebuilt_data.append(rebuilt_item)
|
221 |
+
pbar.update(1) # Update the progress bar with each iterationimport json
|
222 |
+
import os
|
223 |
+
from tqdm import tqdm
|
224 |
+
|
225 |
+
def rebuild_json(input_file, output_file, progress_file):
|
226 |
+
if os.path.exists(progress_file):
|
227 |
+
with open(progress_file, 'r') as pf:
|
228 |
+
progress_data = json.load(pf)
|
229 |
+
last_id = progress_data.get("last_id", 0)
|
230 |
+
else:
|
231 |
+
last_id = 0
|
232 |
+
|
233 |
+
with open(input_file, 'r') as f:
|
234 |
+
data = json.load(f)
|
235 |
+
|
236 |
+
total_ids = len(data)
|
237 |
+
start_index = 0
|
238 |
+
|
239 |
+
# Find the index of the first item with ID greater than last_id
|
240 |
+
for index, item in enumerate(data):
|
241 |
+
if item["id"] > last_id:
|
242 |
+
start_index = index
|
243 |
+
break
|
244 |
+
|
245 |
+
rebuilt_data = []
|
246 |
+
with tqdm(total=total_ids - start_index, initial=start_index) as pbar:
|
247 |
+
for item in data[start_index:]:
|
248 |
+
if item["id"] <= last_id:
|
249 |
+
continue
|
250 |
+
question = item["Original_Question"]
|
251 |
+
print(f"Original Question: {question}")
|
252 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
253 |
+
while choice.lower() not in ['1', '2', 'x']:
|
254 |
+
print("Invalid choice. Please choose either 1 or 2, or type 'x' to Exit.")
|
255 |
+
choice = input(f"Choose for ID {item['id']} (or type 'x' to Exit.): (1) Human / (2) GPT: ")
|
256 |
+
|
257 |
+
if choice.lower() == 'x':
|
258 |
+
break
|
259 |
+
|
260 |
+
chosen_value = item["conversations"][int(choice) - 1]["value"]
|
261 |
+
rebuilt_item = {
|
262 |
+
"id": item["id"],
|
263 |
+
"length": len(question) + len(chosen_value),
|
264 |
+
"conversations": [
|
265 |
+
{"from": "human", "value": question},
|
266 |
+
{"from": "gpt", "value": chosen_value}
|
267 |
+
]
|
268 |
+
}
|
269 |
+
rebuilt_data.append(rebuilt_item)
|
270 |
+
pbar.update(1) # Update the progress bar with each iteration
|
271 |
+
|
272 |
+
with open(output_file, 'a') as f:
|
273 |
+
json.dump(rebuilt_data, f, indent=2)
|
274 |
+
|
275 |
+
if len(rebuilt_data) > 0:
|
276 |
+
last_answered_id = rebuilt_data[-1]["id"]
|
277 |
+
with open(progress_file, 'w') as pf:
|
278 |
+
json.dump({"last_id": last_answered_id}, pf)
|
279 |
+
|
280 |
+
print("Rebuilt data saved successfully!")
|
281 |
+
|
282 |
+
if __name__ == "__main__":
|
283 |
+
input_file = "TEMP_DATASET_2_ANSWERS.json"
|
284 |
+
output_file = "Rebuilt_DATASET.json"
|
285 |
+
progress_file = "selecting_progress.json"
|
286 |
+
rebuild_json(input_file, output_file, progress_file)
|
287 |
+
|
288 |
+
|
289 |
+
with open(output_file, 'a') as f:
|
290 |
+
json.dump(rebuilt_data, f, indent=2)
|
291 |
+
|
292 |
+
if len(rebuilt_data) > 0:
|
293 |
+
last_answered_id = rebuilt_data[-1]["id"]
|
294 |
+
with open(progress_file, 'w') as pf:
|
295 |
+
json.dump({"last_id": last_answered_id}, pf)
|
296 |
+
|
297 |
+
print("Rebuilt data saved successfully!")
|
298 |
+
|
299 |
+
if __name__ == "__main__":
|
300 |
+
input_file = "TEMP_DATASET_2_ANSWERS.json"
|
301 |
+
output_file = "Rebuilt_DATASET.json"
|
302 |
+
progress_file = "selecting_progress.json"
|
303 |
+
rebuild_json(input_file, output_file, progress_file)
|
304 |
+
|
305 |
+
|
306 |
+
with open(output_file, 'a') as f:
|
307 |
+
json.dump(rebuilt_data, f, indent=2)
|
308 |
+
|
309 |
+
if len(rebuilt_data) > 0:
|
310 |
+
last_answered_id = rebuilt_data[-1]["id"]
|
311 |
+
with open(progress_file, 'w') as pf:
|
312 |
+
json.dump({"last_id": last_answered_id}, pf)
|
313 |
+
|
314 |
+
print("Rebuilt data saved successfully!")
|
315 |
+
|
316 |
+
if __name__ == "__main__":
|
317 |
+
input_file = "TEMP_DATASET_2_ANSWERS.json"
|
318 |
+
output_file = "Rebuilt_DATASET.json"
|
319 |
+
progress_file = "selecting_progress.json"
|
320 |
+
rebuild_json(input_file, output_file, progress_file)
|
321 |
+
|
322 |
+
Noteworthy for its inclination towards the darker and more philosophical aspects of conversation, Tenebră's proficiency lies in unraveling complex discussions across a myriad of topics. Drawing from a pool of unconventional datasets, this model ventures into unexplored realms of thought, offering users an experience that is as unconventional as it is intellectually intriguing.
|
323 |
+
|
324 |
+
While Tenebră maintains a self-aware facade, its true allure lies in its ability to engage in profound discussions without succumbing to pretense. Step into the realm of Tenebră!
|
325 |
|
326 |
## Tenebră is available at the following size and flavours:
|
327 |
|