-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathrun.py
435 lines (351 loc) · 13.2 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
import json
import os
import sys
import warnings
from ctcdecode import CTCBeamDecoder
from copy import deepcopy
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from tqdm import tqdm
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.filterwarnings("ignore")
import logging
import torch
import torch.nn as nn
import torchvision
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
logger = logging.getLogger("detectron2")
logger.setLevel(logging.CRITICAL)
TEST_IMAGES_PATH, SAVE_PATH = sys.argv[1:]
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SEGM_MODEL_PATH = "model_final 17k 2160 3130.pth"
OCR_MODEL_PATH = "a model-0-0.0374.ckpt"
CONFIG_JSON = {
"alphabet": """@ !"%'()+,-./0123456789:;=?AEFIMNOSTW[]abcdefghiklmnopqrstuvwxyАБВГДЕЖЗИКЛМНОПРСТУХЦЧШЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё№""",
"image": {"width": 256, "height": 64},
}
def get_contours_from_mask(mask, min_area=5):
contours, hierarchy = cv2.findContours(
mask.astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE
)
contour_list = []
for contour in contours:
if cv2.contourArea(contour) >= min_area:
contour_list.append(contour)
return contour_list
def get_larger_contour(contours):
larger_area = 0
larger_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > larger_area:
larger_contour = contour
larger_area = area
return larger_contour
def black2white(image):
lo=np.array([0,0,0])
hi=np.array([0,0,0])
mask = cv2.inRange(image, lo, hi)
image[mask>0]=(255,255,255)
return image
class SEGMpredictor:
def __init__(self, model_path):
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
)
)
cfg.MODEL.WEIGHTS = model_path
cfg.TEST.EVAL_PERIOD = 1000
cfg.INPUT.MIN_SIZE_TRAIN = 2160
cfg.INPUT.MAX_SIZE_TRAIN = 3130
cfg.INPUT.MIN_SIZE_TEST = 2160
cfg.INPUT.MAX_SIZE_TEST = 3130
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.1
cfg.INPUT.FORMAT = 'BGR'
cfg.DATALOADER.NUM_WORKERS = 4
cfg.SOLVER.IMS_PER_BATCH = 3
cfg.SOLVER.BASE_LR = 0.01
cfg.SOLVER.GAMMA = 0.1
cfg.SOLVER.STEPS = (1500,)
cfg.SOLVER.MAX_ITER = 17000
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.SOLVER.CHECKPOINT_PERIOD = cfg.TEST.EVAL_PERIOD
cfg.TEST.DETECTIONS_PER_IMAGE = 1000
cfg.OUTPUT_DIR = './output'
self.predictor = DefaultPredictor(cfg)
def __call__(self, img):
outputs = self.predictor(img)
prediction = outputs["instances"].pred_masks.cpu().numpy()
contours = []
for pred in prediction:
contour_list = get_contours_from_mask(pred)
contours.append(get_larger_contour(contour_list))
return contours
OOV_TOKEN = "<OOV>"
CTC_BLANK = "<BLANK>"
def get_char_map(alphabet):
"""Make from string alphabet character2int dict.
Add BLANK char fro CTC loss and OOV char for out of vocabulary symbols."""
char_map = {value: idx + 1 for (idx, value) in enumerate(alphabet)}
char_map[CTC_BLANK] = 0
return char_map
class Tokenizer:
"""Class for encoding and decoding string word to sequence of int
(and vice versa) using alphabet."""
def __init__(self, alphabet):
self.char_map = get_char_map(alphabet)
self.rev_char_map = {val: key for key, val in self.char_map.items()}
def encode(self, word_list):
"""Returns a list of encoded words (int)."""
enc_words = []
for word in word_list:
enc_words.append(
[self.char_map[char] if char in self.char_map
else 1
for char in word]
)
return enc_words
def get_num_chars(self):
return len(self.char_map)
def decode(self, enc_word_list):
"""Returns a list of words (str) after removing blanks and collapsing
repeating characters. Also skip out of vocabulary token."""
dec_words = []
for word in enc_word_list:
word_chars = ''
for idx, char_enc in enumerate(word):
# skip if blank symbol, oov token or repeated characters
if (
char_enc != self.char_map[CTC_BLANK]
# idx > 0 to avoid selecting [-1] item
and not (idx > 0 and char_enc == word[idx - 1])
):
word_chars += self.rev_char_map[char_enc]
dec_words.append(word_chars)
return dec_words
def decode_after_beam(self, enc_word_list):
"""Returns a list of words (str) after removing blanks and collapsing
repeating characters. Also skip out of vocabulary token."""
dec_words = []
for word in enc_word_list:
word_chars = ''
for idx, char_enc in enumerate(word):
word_chars += self.rev_char_map[char_enc]
dec_words.append(word_chars)
return dec_words
class Normalize:
def __call__(self, img):
img = img.astype(np.float32) / 255
return img
class ToTensor:
def __call__(self, arr):
arr = torch.from_numpy(arr)
return arr
class MoveChannels:
"""Move the channel axis to the zero position as required in pytorch."""
def __init__(self, to_channels_first=True):
self.to_channels_first = to_channels_first
def __call__(self, image):
if self.to_channels_first:
return np.moveaxis(image, -1, 0)
else:
return np.moveaxis(image, 0, -1)
class ImageResize:
def __init__(self, height, width):
self.height = height
self.width = width
def __call__(self, image):
image = cv2.resize(
image, (self.width, self.height), interpolation=cv2.INTER_LINEAR
)
return image
def get_val_transforms(height, width):
transforms = torchvision.transforms.Compose(
[
MoveChannels(to_channels_first=True),
Normalize(),
ToTensor(),
]
)
return transforms
def get_resnet34_backbone(pretrained=True):
m = torchvision.models.resnet34(pretrained=pretrained)
input_conv = nn.Conv2d(3, 64, 7, 1, 3)
blocks = [input_conv, m.bn1, m.relu,
m.maxpool, m.layer1, m.layer2, m.layer3]
return nn.Sequential(*blocks)
class BiLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout=0.1):
super().__init__()
self.lstm = nn.LSTM(
input_size, hidden_size, num_layers,
dropout=dropout, batch_first=True, bidirectional=True)
def forward(self, x):
out, _ = self.lstm(x)
return out
class CRNN(nn.Module):
def __init__(
self, number_class_symbols
):
super().__init__()
self.feature_extractor = get_resnet34_backbone(pretrained=False)
self.avg_pool = nn.AdaptiveAvgPool2d(
(512, 32))
self.bilstm = BiLSTM(512, 256, 2)
self.classifier = nn.Sequential(
nn.Linear(512, 256),
nn.GELU(),
nn.Dropout(0.1),
nn.Linear(256, number_class_symbols)
)
def forward(self, x):
x = self.feature_extractor(x)
b, c, h, w = x.size()
x = x.view(b, c * h, w)
x = self.avg_pool(x)
x = x.transpose(1, 2)
x = self.bilstm(x)
x = self.classifier(x)
x = nn.functional.log_softmax(x, dim=2).permute(1, 0, 2)
return x
def predict(images, model, tokenizer, device):
model.eval()
images = images.to(device)
with torch.no_grad():
output = model(images)
return output
class InferenceTransform:
def __init__(self, height, width):
self.transforms = get_val_transforms(height, width)
def __call__(self, images):
transformed_images = []
for image in images:
image = self.transforms(image)
transformed_images.append(image)
transformed_tensor = torch.stack(transformed_images, 0)
return transformed_tensor
def process_image(img, n_w=256, n_h=64):
# img = prepare_image(img)
w, h, _ = img.shape
new_w = n_h
new_h = int(h * (new_w / w))
img = cv2.resize(img, (new_h, new_w))
w, h, _ = img.shape
# img = img.astype('float32')
if w < n_h:
add_zeros = np.full((n_h - w, h, 3), 0)
img = np.concatenate((img, add_zeros))
w, h, _ = img.shape
if h < n_w:
add_zeros = np.full((w, n_w - h, 3), 0)
img = np.concatenate((img, add_zeros), axis=1)
w, h, _ = img.shape
if h > n_w or w > n_h:
dim = (n_w, n_h)
img = cv2.resize(img, dim)
return img
class OcrPredictor:
def __init__(self, model_path, config, device="cuda"):
self.tokenizer = Tokenizer(config["alphabet"])
self.device = torch.device(device)
# load model
self.model = CRNN(number_class_symbols=self.tokenizer.get_num_chars())
self.model.load_state_dict(torch.load(model_path))
self.model.to(self.device)
self.transforms = InferenceTransform(
height=config["image"]["height"],
width=config["image"]["width"],
)
labels_for_bs = """_@|!"%'()+,-./0123456789:;=?AEFIMNOSTW[]abcdefghiklmnopqrstuvwxyАБВГДЕЖЗИКЛМНОПРСТУХЦЧШЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё№"""
self.decoder = CTCBeamDecoder(
list(labels_for_bs),
model_path='nto_kenlm_model10.arpa',
alpha=0.22,
beta=1.1,
cutoff_top_n=5,
cutoff_prob=1,
beam_width=10,
num_processes=4,
blank_id=0,
log_probs_input=True)
def __call__(self, images):
if isinstance(images, (list, tuple)):
one_image = False
elif isinstance(images, np.ndarray):
images = images
one_image = True
else:
raise Exception(
f"Input must contain np.ndarray, "
f"tuple or list, found {type(images)}."
)
images = black2white(images)
images = [process_image(images)]
images = self.transforms(images)
output = predict(images, self.model, self.tokenizer, self.device)
beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(output.permute(1, 0, 2))
encoded_text = beam_results[0][0][:out_lens[0][0]]
text_pred = self.tokenizer.decode_after_beam([encoded_text.numpy()])[0]
return text_pred
def get_image_visualization(img, pred_data, fontpath, font_koef=50):
h, w = img.shape[:2]
font = ImageFont.truetype(fontpath, int(h / font_koef))
empty_img = Image.new("RGB", (w, h), (255, 255, 255))
draw = ImageDraw.Draw(empty_img)
for prediction in pred_data["predictions"]:
polygon = prediction["polygon"]
pred_text = prediction["text"]
cv2.drawContours(img, np.array([polygon]), -1, (0, 255, 0), 2)
x, y, w, h = cv2.boundingRect(np.array([polygon]))
draw.text((x, y), pred_text, fill=0, font=font)
vis_img = np.array(empty_img)
vis = np.concatenate((img, vis_img), axis=1)
return vis
def crop_img_by_polygon(img, polygon):
# https://stackoverflow.com/questions/48301186/cropping-concave-polygon-from-image-using-opencv-python
pts = np.array(polygon)
rect = cv2.boundingRect(pts)
x, y, w, h = rect
croped = img[y : y + h, x : x + w].copy()
pts = pts - pts.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
dst = cv2.bitwise_and(croped, croped, mask=mask)
return dst
class PiepleinePredictor:
def __init__(self, segm_model_path, ocr_model_path, ocr_config):
self.segm_predictor = SEGMpredictor(model_path=segm_model_path)
self.ocr_predictor = OcrPredictor(model_path=ocr_model_path, config=ocr_config)
def __call__(self, img):
output = {"predictions": []}
contours = self.segm_predictor(img)
for contour in contours:
if contour is not None:
crop = crop_img_by_polygon(img, contour)
pred_text = self.ocr_predictor(crop)
output["predictions"].append(
{
"polygon": [[int(i[0][0]), int(i[0][1])] for i in contour],
"text": pred_text,
}
)
return output
def main():
pipeline_predictor = PiepleinePredictor(
segm_model_path=SEGM_MODEL_PATH,
ocr_model_path=OCR_MODEL_PATH,
ocr_config=CONFIG_JSON,
)
pred_data = {}
for img_name in tqdm(os.listdir(TEST_IMAGES_PATH)):
image = cv2.imread(os.path.join(TEST_IMAGES_PATH, img_name))
pred_data[img_name] = pipeline_predictor(image)
with open(SAVE_PATH, "w") as f:
json.dump(pred_data, f)
if __name__ == "__main__":
main()