wikipedia数据集预处理
Notes
wikipedia[1] 用于检索的数据集,包含 2866 个样本、10 个类,图像、文本两个模态。
想按照 [2] 的设置处理数据,而 [2] 的设置应该来自 [3],即 images 用 CaffeNet[4] 提取 fc7 层[5] 的 4096 维特征,texts 用 word2vec[6] 提取每个单词的 100 维词向量并取平均。
暂时用 Keras 预训练的 VGG16[7,8] 代替 CaffeNet,参考 [12];word2vec 特征用 gensim[9] 库生成,参考 [13, 14]。
Data
从 [10] 下载,解压之后有 trainset_txt_img_cat.list 和 testset_txt_img_cat.list 两个文件,里面每行代表一个样本,分 3 列:text 文件名、image 文件名、class id。
text 数据在 texts/
下,装在 .xml 文件里。本想用 minidom[11] 解析,但因为一些特殊符号(比如单独的 &
)解析不了,未找到好方法,暂时手动解析。
image 数据在 images/
下,分类放在不同文件夹。
Code
import os
from os.path import join
import numpy as np
from gensim.models import Word2Vec
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.models import Model
P = "wikipedia_dataset"
IMG_P = "images"
TXT_P = "texts"
TRAIN_LIST = "trainset_txt_img_cat.list"
TEST_LIST = "testset_txt_img_cat.list"
os.chdir(P) # 切去解压目录
print(os.getcwd())
sample list
将 sample list 读出来,方便以同一顺序处理 images、texts、labels
ls_img = []
ls_txt = []
ls_lab = []
for fname in (TRAIN_LIST, TEST_LIST):
with open(fname, "r") as f:
for line in f:
txt_f, img_f, lab = line.split()
#txt_f = join(TXT_P, txt_f, ".xml")
#img_f = join(IMG_P, img_f, ".jpg")
ls_img.append(img_f)
ls_txt.append(txt_f)
ls_lab.append(int(lab))
print(len(ls_img), len(ls_txt), len(ls_lab))
labels
labels 读出来就可以直接保存
labels = np.asarray(ls_lab)
print(labels.shape)
np.save("labels.npy", labels)
texts
手动解析 .xml,清除一些多余的符号
def parse(fn):
"""手动解析 xml:读 <text> </text> 之间的部分"""
res = ""
flag = False
with open(fn, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
if line == "</text>":
break
if flag:
res += " " + line
if line == "<text>":
flag = True
return res
def clean(strings, pattern):
"""驱邪……"""
return [s.replace(pattern, "") for s in strings]
"""解析 xml"""
sentences = []
for txt_f in ls_txt:
txt_f = join(TXT_P, "{}.xml".format(txt_f))
# print(txt_f)
doc = parse(txt_f) # 手动解析
# doc = minidom.parse(txt_f).documentElement.getElementsByTagName("text")[0].childNodes[0].data
words = doc.split()
# 清除多余符号
for pat in (",", ".", "!", "?", "''", "(", ")", "\"", ":", ";", "{", "}", "[", "]"):
words = clean(words, pat)
sentences.append(words)
print(len(sentences))
"""训练 word2vec 模型"""
# [3] 说用 skip-gram
w2v = Word2Vec(sentences, size=100, min_count=5, iter=50, sg=1) # sg = skip-gram
"""提取文本特征"""
texts = np.zeros([len(sentences), 100])
for i, s in enumerate(sentences):
cnt = 0
for w in s:
if w in w2v:
cnt += 1
texts[i] += w2v[w]
# 取平均词向量
texts[i] /= cnt
# 保存
np.save("texts.w2v.100.npy", texts)
images
将图片全部复制到同一个目录,方便操作。用 VGG16 提特征
ALL_IMG_P = "images_all"
if not os.path.exists(ALL_IMG_P):
os.makedirs(ALL_IMG_P)
"""全复制到 ALL_IMG_P"""
for cls in os.listdir(IMG_P):
cls_d = join(IMG_P, cls)
# print(os.listdir(cls_d))
for img in os.listdir(cls_d):
# os.system("cp {} {}".format(join(cls_d, img), ALL_IMG_P)) # linux
os.system("copy {} {}".format(join(cls_d, img), ALL_IMG_P)) # windows
print(len(os.listdir(ALL_IMG_P)))
"""提特征"""
base_model = VGG16(weights='imagenet')
# print(base_model.summary())
model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output)
# print(model.summary())
images = []
for i_name in ls_img:
img_f = join(ALL_IMG_P, "{}.jpg".format(i_name))
img = image.load_img(img_f, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
images.append(model.predict(x))
images = np.vstack(images)
print(images.shape)
# 保存
np.save("images.vgg16.npy", images)
Processed Data
数据放在百度云盘,有原数据和处理过的。
链接:https://pan.baidu.com/s/19pjYO5Uxsq2aiGFqofp-CQ,提取码:gr9m
。
References
- A new approach to cross-modal multimedia retrieval
- Semi-Supervised Cross-Modal Retrieval with Label Prediction
- Generalized Semi-supervised and Structured Subspace Learning for Cross-Modal Retrieval
- Caffe: Convolutional Architecture for Fast Feature Embedding
- caffe/models/bvlc_reference_caffenet/train_val.prototxt
- Distributed representations of words and phrases and their compositionality
- Very Deep Convolutional Networks for Large-Scale Image Recognition
- VGG16
- gensim
- Cross-Modal Multimedia Retrieval
- xml.dom.minidom
- keras预训练模型应用(3):VGG19提取任意层特征
- 基于 Gensim 的 Word2Vec 实践
- 用gensim学习word2vec
上一篇: 数据集的预处理
下一篇: 帮忙看下这个怎么实现