Datasets:

Modalities:
Image
Text
Formats:
parquet
Languages:
English
Libraries:
Datasets
Dask
License:
File size: 848 Bytes
1c3f728
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import csv
import gzip
import io

def load_and_process_csv(input_file_path, output_file_path):
    with gzip.open(input_file_path, 'rt', newline='', encoding='utf-8') as infile:
        reader = csv.reader(infile)
        rows = list(reader)

    processed_rows = []
    for row in rows:
        processed_row = []
        for field in row:
            if field.startswith('"') and field.endswith('"'):
                field = field.replace('\n', ' ')
            processed_row.append(field)
        processed_rows.append(processed_row)

    with gzip.open(output_file_path, 'wt', newline='', encoding='utf-8') as outfile:
        writer = csv.writer(outfile)
        writer.writerows(processed_rows)

# Example usage
input_file_path = 'MetObjects.csv.gz'
output_file_path = 'metadata.csv.gz'
load_and_process_csv(input_file_path, output_file_path)