-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathbang_prepare_dataset.py
101 lines (84 loc) · 3.22 KB
/
bang_prepare_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
"""Dataset for Bangla CLIP :: https://github.com/zabir-nabil/bangla-CLIP"""
import json
from pathlib import Path
from tqdm import tqdm
import pandas as pd
train_dataframe = {}
train_dataframe['caption'] = []
train_dataframe['image'] = []
valid_dataframe = {}
valid_dataframe['caption'] = []
valid_dataframe['image'] = []
# https://data.mendeley.com/datasets/rxxch9vw59/2
with open('/data/captions.json', encoding='utf-8') as fh:
data = json.load(fh)
trn_split = int(0.8 * len(data))
for sample in tqdm(data[:trn_split]):
fn = sample['filename']
cp = sample['caption']
my_file = Path(f"/data/images/{fn}")
if my_file.is_file():
for tc in cp:
tc = tc.replace(',', ' ')
train_dataframe['caption'].append(tc)
train_dataframe['image'].append(f"/data/images/{fn}")
for sample in tqdm(data[trn_split:]):
fn = sample['filename']
cp = sample['caption']
my_file = Path(f"/data/images/{fn}")
if my_file.is_file():
for vc in cp:
vc = vc.replace(',', ' ')
valid_dataframe['caption'].append(vc)
valid_dataframe['image'].append(f"/data/images/{fn}")
# https://www.kaggle.com/datasets/almominfaruk/bnaturebengali-image-captioning-dataset?resource=download
# BNature dataset
lines = open("/data/caption/caption.txt", "r").readlines()
trn_split = int(0.8 * len(lines))
for line in tqdm(lines[:trn_split]):
fn = [x.strip() for x in line.split()][0]
cp = ' '.join([x.strip() for x in line.split()][1:])
my_file = Path(f"/data/Pictures/{fn}")
if my_file.is_file():
cp = cp.replace(',', ' ')
train_dataframe['caption'].append(cp)
train_dataframe['image'].append(f"/data/Pictures/{fn}")
for line in tqdm(lines[trn_split:]):
fn = [x.strip() for x in line.split()][0]
cp = ' '.join([x.strip() for x in line.split()][1:])
my_file = Path(f"/data/Pictures/{fn}")
if my_file.is_file():
cp = cp.replace(',', ' ')
valid_dataframe['caption'].append(cp)
valid_dataframe['image'].append(f"/data/Pictures/{fn}")
### flickr8k bang translation
ban_caps = pd.read_csv("./../BAN-Cap_captiondata.csv")
cap_ids = list(ban_caps['caption_id'])
ban_trans = list(ban_caps['bengali_caption'])
trn_split = int(0.8 * len(cap_ids))
for j in tqdm(range(len(cap_ids[:trn_split]))):
ci = cap_ids[j].split("#")[0]
bt = ban_trans[j]
fn = ci
cp = bt
my_file = Path(f"/data/flickr8k_images/{fn}")
if my_file.is_file():
cp = cp.replace(',', ' ')
train_dataframe['caption'].append(cp)
train_dataframe['image'].append(f"/data/flickr8k_images/{fn}")
for j in tqdm(range(len(cap_ids[trn_split:]))):
ci = cap_ids[j].split("#")[0]
bt = ban_trans[j]
fn = ci
cp = bt
my_file = Path(f"/data/flickr8k_images/{fn}")
if my_file.is_file():
cp = cp.replace(',', ' ')
valid_dataframe['caption'].append(cp)
valid_dataframe['image'].append(f"/data/flickr8k_images/{fn}")
train_dataframe = pd.DataFrame(train_dataframe)
valid_dataframe = pd.DataFrame(valid_dataframe)
print(train_dataframe.head())
print(valid_dataframe.head())
train_dataframe.to_csv('train_df_bang.csv', index = None)
valid_dataframe.to_csv('valid_df_bang.csv', index = None)