File size: 1,666 Bytes
cfb13df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import json
import logging

import pandas as pd
from datasets import load_dataset, Dataset

train_dataset = load_dataset("OpenAssistant/oasst1")["train"]


def get_children(df, parent_ids):
    children = df[df['parent_id'].isin(parent_ids)]
    return children.sort_values('rank', ascending=True).drop_duplicates('parent_id')


def trace_conversations(df, parent_ids):
    conversations = []
    children = get_children(df, parent_ids)

    while not children.empty:
        conversations.extend(children.to_dict('records'))
        parent_ids = children['message_id']
        children = get_children(df, parent_ids)

    return conversations

# Convert the HuggingFace's dataset to pandas dataframe
df = pd.DataFrame.from_records(train_dataset)

# Get the root nodes
root_nodes = df[df['parent_id'].isnull()]

conversations = []
for idx, root in root_nodes.iterrows():
    conversation_chain = [root.to_dict()]
    conversation_chain.extend(trace_conversations(df, [root['message_id']]))
    conversations.append(conversation_chain)

# Select only necessary columns for each conversation
for conversation in conversations:
    for message in conversation:
        keys_to_delete = set(message.keys()) - {'message_id', 'parent_id', 'role', 'text'}
        for key in keys_to_delete:
            del message[key]

# Create a new dataframe with only the 'conversations' field
result_df = pd.DataFrame({'conversations': conversations})

# Convert dataframe back to HuggingFace's dataset
result_dataset = Dataset.from_pandas(result_df)

logging.info(result_dataset)

with open("guanaco.jsonl", "w") as f:
    for row in result_dataset:
        f.write(json.dumps(row))