forked from 3b1b/manim
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcamera.py
398 lines (337 loc) · 14.1 KB
/
camera.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
import numpy as np
import itertools as it
import os
from PIL import Image
from colour import Color
import aggdraw
from helpers import *
from mobject import Mobject, PMobject, VMobject, ImageMobject, Group
class Camera(object):
CONFIG = {
"background_image" : None,
"pixel_shape" : (DEFAULT_HEIGHT, DEFAULT_WIDTH),
#this will be resized to match pixel_shape
"space_shape" : (SPACE_HEIGHT, SPACE_WIDTH),
"space_center" : ORIGIN,
"background_color" : BLACK,
#Points in vectorized mobjects with norm greater
#than this value will be rescaled.
"max_allowable_norm" : 2*SPACE_WIDTH,
"image_mode" : "RGBA",
"n_rgb_coords" : 4,
"background_alpha" : 0, #Out of 255
"pixel_array_dtype" : 'uint8'
}
def __init__(self, background = None, **kwargs):
digest_config(self, kwargs, locals())
self.init_background()
self.resize_space_shape()
self.reset()
def resize_space_shape(self, fixed_dimension = 0):
"""
Changes space_shape to match the aspect ratio
of pixel_shape, where fixed_dimension determines
whether space_shape[0] (height) or space_shape[1] (width)
remains fixed while the other changes accordingly.
"""
aspect_ratio = float(self.pixel_shape[1])/self.pixel_shape[0]
space_height, space_width = self.space_shape
if fixed_dimension == 0:
space_width = aspect_ratio*space_height
else:
space_height = space_width/aspect_ratio
self.space_shape = (space_height, space_width)
def init_background(self):
if self.background_image is not None:
path = get_full_image_path(self.background_image)
image = Image.open(path).convert(self.image_mode)
height, width = self.pixel_shape
#TODO, how to gracefully handle backgrounds
#with different sizes?
self.background = np.array(image)[:height, :width]
self.background = self.background.astype(self.pixel_array_dtype)
else:
background_rgba = color_to_int_rgba(
self.background_color, alpha = self.background_alpha
)
self.background = np.zeros(
list(self.pixel_shape)+[self.n_rgb_coords],
dtype = self.pixel_array_dtype
)
self.background[:,:] = background_rgba
def get_image(self):
return Image.fromarray(
self.pixel_array,
mode = self.image_mode
)
def get_pixel_array(self):
return self.pixel_array
def set_pixel_array(self, pixel_array):
self.pixel_array = np.array(pixel_array)
def set_background(self, pixel_array):
self.background = np.array(pixel_array)
def reset(self):
self.set_pixel_array(np.array(self.background))
def capture_mobject(self, mobject):
return self.capture_mobjects([mobject])
def capture_mobjects(self, mobjects, include_submobjects = True):
if include_submobjects:
mobjects = it.chain(*[
mob.family_members_with_points()
for mob in mobjects
])
vmobjects = []
for mobject in mobjects:
if isinstance(mobject, VMobject):
vmobjects.append(mobject)
elif len(vmobjects) > 0:
self.display_multiple_vectorized_mobjects(vmobjects)
vmobjects = []
if isinstance(mobject, PMobject):
self.display_point_cloud(
mobject.points, mobject.rgbas,
self.adjusted_thickness(mobject.stroke_width)
)
elif isinstance(mobject, ImageMobject):
self.display_image_mobject(mobject)
elif isinstance(mobject, Mobject):
pass #Remainder of loop will handle submobjects
else:
raise Exception(
"Unknown mobject type: " + mobject.__class__.__name__
)
#TODO, more? Call out if it's unknown?
self.display_multiple_vectorized_mobjects(vmobjects)
def display_multiple_vectorized_mobjects(self, vmobjects):
if len(vmobjects) == 0:
return
#More efficient to bundle together in one "canvas"
image = Image.fromarray(self.pixel_array, mode = self.image_mode)
canvas = aggdraw.Draw(image)
for vmobject in vmobjects:
self.display_vectorized(vmobject, canvas)
canvas.flush()
self.pixel_array[:,:] = image
def display_vectorized(self, vmobject, canvas):
if vmobject.is_subpath:
#Subpath vectorized mobjects are taken care
#of by their parent
return
pen, fill = self.get_pen_and_fill(vmobject)
pathstring = self.get_pathstring(vmobject)
symbol = aggdraw.Symbol(pathstring)
canvas.symbol((0, 0), symbol, pen, fill)
def get_pen_and_fill(self, vmobject):
pen = aggdraw.Pen(
self.color_to_hex_l(self.get_stroke_color(vmobject)),
max(vmobject.stroke_width, 0)
)
fill = aggdraw.Brush(
self.color_to_hex_l(self.get_fill_color(vmobject)),
opacity = int(255*vmobject.get_fill_opacity())
)
return (pen, fill)
def color_to_hex_l(self, color):
try:
return color.get_hex_l()
except:
return Color(BLACK).get_hex_l()
def get_stroke_color(self, vmobject):
return vmobject.get_stroke_color()
def get_fill_color(self, vmobject):
return vmobject.get_fill_color()
def get_pathstring(self, vmobject):
result = ""
for mob in [vmobject]+vmobject.get_subpath_mobjects():
points = mob.points
# points = self.adjust_out_of_range_points(points)
if len(points) == 0:
continue
points = self.align_points_to_camera(points)
coords = self.points_to_pixel_coords(points)
start = "M%d %d"%tuple(coords[0])
#(handle1, handle2, anchor) tripletes
triplets = zip(*[
coords[i+1::3]
for i in range(3)
])
cubics = [
"C" + " ".join(map(str, it.chain(*triplet)))
for triplet in triplets
]
end = "Z" if vmobject.mark_paths_closed else ""
result += " ".join([start] + cubics + [end])
return result
def display_point_cloud(self, points, rgbas, thickness):
if len(points) == 0:
return
points = self.align_points_to_camera(points)
pixel_coords = self.points_to_pixel_coords(points)
pixel_coords = self.thickened_coordinates(
pixel_coords, thickness
)
rgb_len = self.pixel_array.shape[2]
rgbas = (255*rgbas).astype('uint8')
target_len = len(pixel_coords)
factor = target_len/len(rgbas)
rgbas = np.array([rgbas]*factor).reshape((target_len, rgb_len))
on_screen_indices = self.on_screen_pixels(pixel_coords)
pixel_coords = pixel_coords[on_screen_indices]
rgbas = rgbas[on_screen_indices]
ph, pw = self.pixel_shape
flattener = np.array([1, pw], dtype = 'int')
flattener = flattener.reshape((2, 1))
indices = np.dot(pixel_coords, flattener)[:,0]
indices = indices.astype('int')
new_pa = self.pixel_array.reshape((ph*pw, rgb_len))
new_pa[indices] = rgbas
self.pixel_array = new_pa.reshape((ph, pw, rgb_len))
def display_image_mobject(self, image_mobject):
corner_coords = self.points_to_pixel_coords(image_mobject.points)
ul_coords, ur_coords, dl_coords = corner_coords
right_vect = ur_coords - ul_coords
down_vect = dl_coords - ul_coords
impa = image_mobject.pixel_array
oh, ow = self.pixel_array.shape[:2] #Outer width and height
ih, iw = impa.shape[:2] #inner with and height
rgb_len = self.pixel_array.shape[2]
image = np.zeros((oh, ow, rgb_len), dtype = self.pixel_array_dtype)
if right_vect[1] == 0 and down_vect[0] == 0:
rv0 = right_vect[0]
dv1 = down_vect[1]
x_indices = np.arange(rv0, dtype = 'int')*iw/rv0
y_indices = np.arange(dv1, dtype = 'int')*ih/dv1
stretched_impa = impa[y_indices][:,x_indices]
x0, x1 = ul_coords[0], ur_coords[0]
y0, y1 = ul_coords[1], dl_coords[1]
if x0 >= ow or x1 < 0 or y0 >= oh or y1 < 0:
return
siy0 = max(-y0, 0) #stretched_impa y0
siy1 = dv1 - max(y1-oh, 0)
six0 = max(-x0, 0)
six1 = rv0 - max(x1-ow, 0)
x0 = max(x0, 0)
y0 = max(y0, 0)
image[y0:y1, x0:x1] = stretched_impa[siy0:siy1, six0:six1]
else:
# Alternate (slower) tactice if image is tilted
# List of all coordinates of pixels, given as (x, y),
# which matches the return type of points_to_pixel_coords,
# even though np.array indexing naturally happens as (y, x)
all_pixel_coords = np.zeros((oh*ow, 2), dtype = 'int')
a = np.arange(oh*ow, dtype = 'int')
all_pixel_coords[:,0] = a%ow
all_pixel_coords[:,1] = a/ow
recentered_coords = all_pixel_coords - ul_coords
coord_norms = np.linalg.norm(recentered_coords, axis = 1)
with np.errstate(divide = 'ignore'):
ix_coords, iy_coords = [
np.divide(
dim*np.dot(recentered_coords, vect),
np.dot(vect, vect),
)
for vect, dim in (right_vect, iw), (down_vect, ih)
]
to_change = reduce(op.and_, [
ix_coords >= 0, ix_coords < iw,
iy_coords >= 0, iy_coords < ih,
])
n_to_change = np.sum(to_change)
inner_flat_coords = iw*iy_coords[to_change] + ix_coords[to_change]
flat_impa = impa.reshape((iw*ih, rgb_len))
target_rgbas = flat_impa[inner_flat_coords, :]
image = image.reshape((ow*oh, rgb_len))
image[to_change] = target_rgbas
image = image.reshape((oh, ow, rgb_len))
self.overlay_rgba_array(image)
def overlay_rgba_array(self, arr):
# """ Overlays arr onto self.pixel_array with relevant alphas"""
bg, fg = self.pixel_array/255.0, arr/255.0
bga, fga = [arr[:,:,3:] for arr in bg, fg]
alpha_sum = fga + (1-fga)*bga
with np.errstate(divide = 'ignore', invalid='ignore'):
bg[:,:,:3] = reduce(op.add, [
np.divide(fg[:,:,:3]*fga, alpha_sum),
np.divide(bg[:,:,:3]*(1-fga)*bga, alpha_sum),
])
bg[:,:,3:] = 1 - (1 - bga)*(1 - fga)
self.pixel_array = (255*bg).astype(self.pixel_array_dtype)
def align_points_to_camera(self, points):
## This is where projection should live
return points - self.space_center
def adjust_out_of_range_points(self, points):
if not np.any(points > self.max_allowable_norm):
return points
norms = np.apply_along_axis(np.linalg.norm, 1, points)
violator_indices = norms > self.max_allowable_norm
violators = points[violator_indices,:]
violator_norms = norms[violator_indices]
reshaped_norms = np.repeat(
violator_norms.reshape((len(violator_norms), 1)),
points.shape[1], 1
)
rescaled = self.max_allowable_norm * violators / reshaped_norms
points[violator_indices] = rescaled
return points
def points_to_pixel_coords(self, points):
result = np.zeros((len(points), 2))
ph, pw = self.pixel_shape
sh, sw = self.space_shape
width_mult = pw/sw/2
width_add = pw/2
height_mult = ph/sh/2
height_add = ph/2
#Flip on y-axis as you go
height_mult *= -1
result[:,0] = points[:,0]*width_mult + width_add
result[:,1] = points[:,1]*height_mult + height_add
return result.astype('int')
def on_screen_pixels(self, pixel_coords):
return reduce(op.and_, [
pixel_coords[:,0] >= 0,
pixel_coords[:,0] < self.pixel_shape[1],
pixel_coords[:,1] >= 0,
pixel_coords[:,1] < self.pixel_shape[0],
])
def adjusted_thickness(self, thickness):
big_shape = PRODUCTION_QUALITY_CAMERA_CONFIG["pixel_shape"]
factor = sum(big_shape)/sum(self.pixel_shape)
return 1 + (thickness-1)/factor
def get_thickening_nudges(self, thickness):
_range = range(-thickness/2+1, thickness/2+1)
return np.array(
list(it.product([0], _range))+
list(it.product(_range, [0]))
)
def thickened_coordinates(self, pixel_coords, thickness):
nudges = self.get_thickening_nudges(thickness)
pixel_coords = np.array([
pixel_coords + nudge
for nudge in nudges
])
size = pixel_coords.size
return pixel_coords.reshape((size/2, 2))
class MovingCamera(Camera):
"""
Stays in line with the height, width and position
of a given mobject
"""
CONFIG = {
"aligned_dimension" : "width" #or height
}
def __init__(self, mobject, **kwargs):
digest_locals(self)
Camera.__init__(self, **kwargs)
def capture_mobjects(self, *args, **kwargs):
self.space_center = self.mobject.get_center()
self.realign_space_shape()
Camera.capture_mobjects(self, *args, **kwargs)
def realign_space_shape(self):
height, width = self.space_shape
if self.aligned_dimension == "height":
self.space_shape = (self.mobject.get_height()/2, width)
else:
self.space_shape = (height, self.mobject.get_width()/2)
self.resize_space_shape(
0 if self.aligned_dimension == "height" else 1
)