-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathimage_previewer.py
216 lines (191 loc) · 8.64 KB
/
image_previewer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# Core / IO
import pathlib
import configparser
import time
import shutil
import os
from io import BytesIO
import time
# Image processing (PIL)
import PIL
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageOps
PIL.Image.MAX_IMAGE_PIXELS = None
# Scipy stack
import numpy as np
#============================ Image Processing ============================#
def to1(x):
'''Sigmoid function that maps (-inf,inf) to (0,1). Centered at x=1 (that is, to1(1)=0.5).'''
return 0.5+(2.0*x-2.0)/(2*np.sqrt((2.0*x-2.0)*(2.0*x-2.0)+1.0))
def trim_ends(data, cutoff):
'''Trims end quantiles given by cutoff.'''
cth = np.quantile(data, cutoff)
ctl = np.quantile(data, 1.0-cutoff)
f1 = data[data < cth]
return f1[f1 > ctl]
def expand_highs(x):
'''A piecewise function to expand contrast in values between 0.9 and 1'''
return np.piecewise(x, [x <= 0.9, x > 0.9], [lambda x: x*0.8/0.9, lambda x: 100.0/9.0*(x-0.9)**2 + 0.8*x/0.9])
def image_histogram_equalization(image, number_bins=10000):
# from http://www.janeriksolem.net/histogram-equalization-with-python-and.html
# get image histogram
r = image.flatten()
r = r[~np.isnan(r)]
r = r[np.isfinite(r)]
try:
image_histogram, bins = np.histogram(image.flatten(), number_bins, density=True)
except ValueError:
return -1
cdf = image_histogram.cumsum() # cumulative distribution function
cdf = cdf / cdf[-1] # normalize
# use linear interpolation of cdf to find new pixel values
image_equalized = np.interp(image.flatten(), bins[:-1], cdf)
return image_equalized.reshape(image.shape)
def level_adjust(fits_arr):
"""
Tone-maps a .fits image from the JWST using a robust combination of techniques.
Parameters:
fits_arr: a 2D numpy float64 array obtained from a .fits file.
"""
hist_dat = fits_arr.flatten()
# Don't consider zero or infinite values when histogramming
hist_dat = hist_dat[np.isfinite(hist_dat)]
hist_dat = hist_dat[np.nonzero(hist_dat)]
if len(hist_dat) == 0:
return -1
zeros = np.abs(np.sign(fits_arr))
minval = np.quantile(hist_dat, 0.03)
maxval = np.quantile(hist_dat, 0.98)
rescaled = (fits_arr-minval)/(maxval-minval)
rescaled_no_outliers = np.maximum(rescaled, np.quantile(rescaled, 0.002))
rescaled_no_outliers = np.minimum(rescaled_no_outliers, np.quantile(rescaled_no_outliers, 1.0-0.002))
img_eqd = image_histogram_equalization(rescaled_no_outliers)
if isinstance(img_eqd, int):
return -1
img_eqd = (pow(img_eqd, 4.0) + pow(img_eqd, 8.0) + pow(img_eqd, 16.0))/3.0
adjusted = expand_highs((img_eqd + to1(rescaled))*0.5)
return np.clip(adjusted*zeros, 0.0, 1.0)
font = ImageFont.truetype("PTMono-Regular.ttf", 14)
def add_text(path_in, path_out, text):
"""
Adds text to an image.
Parameters:
path_in: Input image path
path_out: Output photo path
text: A string containing the text to add
"""
try:
os.remove(path_out)
except FileNotFoundError:
pass
with Image.open(path_in) as img_in:
drawt = ImageDraw.Draw(img_in)
txw = drawt.textbbox((5, 5), text, font=font)[2] + 5 # x0 y0 x1 y1
new_size = (max(img_in.size[0], txw), np.round(img_in.size[1]+160).astype(np.int64))
img_out = Image.new(mode="L", size=new_size, color=(0))
img_out.paste(img_in, (0, 0))
draw = ImageDraw.Draw(img_out)
draw.text((5, img_in.size[1] + 5),text,(255),font=font)
img_out.save(path_out)
# scales to a set number of bytes
def scale_image_to_size(path, path_out, max_size_b, iterations):
if os.path.getsize(path) < max_size_b:
with Image.open(path) as img_in:
dims_out = np.array(img_in.size)
# .PNG compression means that a very homogeneous, non-noisy photo can have a small file size
# while still being absolutely huge (in terms of its dimensions).
#
# I don't want this bot posting images with too many pixels, because Twitter doesn't like it.
# So I set the cap at 3840x2160=8294400 pixels (4K resolution).
if dims_out[0]*dims_out[1] > 8294400:
sf = dims_out[0]*dims_out[1]/8294400.0
last_good_size = np.round(dims_out/sf).astype(np.int64)
print("Over ~8m pixels, shrinking...")
with Image.open(path) as img:
img = img.resize(last_good_size, resample=Image.Resampling.LANCZOS)
img.save(path_out)
print("Rescaled to dimensions " + str(last_good_size))
return last_good_size
shutil.copyfile(path, path_out)
with Image.open(path) as img_in:
return img_in.size
else:
print("Image file size is too large (" + str(os.path.getsize(path)) + " bytes). Resizing...")
with Image.open(path) as img_in:
dims_jump = np.array(img_in.size)
dims_out = np.array(img_in.size)
last_good_size = img_in.size
# Binary search for largest possible image dimensions
for i in range(0, iterations):
dims_jump = np.ceil(dims_jump/2).astype(np.int64)
sz = 0
with Image.open(path) as img:
img = img.resize(dims_out, resample=Image.Resampling.NEAREST)
img_file = BytesIO()
img.save(img_file, 'png')
sz = img_file.tell()
if sz < max_size_b:
last_good_size = dims_out
dims_out += dims_jump
else:
dims_out -= dims_jump
# Not sure why I chose 6144, there's probably a reason.
if max(dims_out[0], dims_out[1]) > 6144:
sf = max(dims_out[0], dims_out[1]) / 6144.0
last_good_size = np.round(dims_out/sf).astype(np.int64)
# We need to do the 4K check here, too
if dims_out[0]*dims_out[1] > 8000000:
sf = dims_out[0]*dims_out[1]/8000000.0
last_good_size = np.round(dims_out/sf).astype(np.int64)
print("Over ~8m pixels, shrinking...")
# <8 pixels is too small! Upscale it to 8 pixels if this happens.
# (still too small, but not invisible, at least)
if min(dims_out[0], dims_out[1]) <= 8:
sf = min(dims_out[0], dims_out[1]) / 8.0
last_good_size = np.round(dims_out/sf).astype(np.int64)
print("Total pixels:", dims_out[0]*dims_out[1])
with Image.open(path) as img:
img = img.resize(last_good_size, resample=Image.Resampling.LANCZOS)
img.save(path_out)
print("Rescaled to dimensions " + str(last_good_size))
return last_good_size
#============================ Main ============================#
def to_photo_BW(description, caption, data, path):
img_path = path + ".png"
scaled_img_path = path + "_scaled.png"
scaled_text_img_path = path + "_scaled_text.png"
arr = level_adjust(data)
img = Image.fromarray((arr*255).astype(np.uint8))
img = ImageOps.flip(img)
img.save(img_path, format="PNG")
scale_image_to_size(img_path, scaled_img_path, 3500000, 10)
add_text(scaled_img_path, scaled_text_img_path, description)
os.remove(img_path)
os.remove(scaled_img_path)
return scaled_text_img_path
if __name__ == "__main__":
if not os.path.exists("./preview"):
os.makedirs("./preview")
while True:
metadata = list(filter(lambda s : ".txt" in s, os.listdir('./data_queue')))
for txt_path in metadata:
with open("./data_queue/" + txt_path, 'r') as f:
sections = f.read().split("~")
description = '\n'.join(sections[1].split("\n")[1:-1])
caption = '\n'.join(sections[2].split("\n")[1:])
objid = int(caption.split("\n")[-2].split(" ")[1])
print("---------------------")
print("OBJ ID: " + str(objid))
print("Caption: ")
print(caption)
print("Description: ")
print(description)
print("---------------------")
data_path = "./data_queue/" + ".".join(txt_path.split(".")[0:2]) + ".npy"
data = np.load(data_path)
output_path = to_photo_BW(description, caption, data, "./preview/" + ".".join(txt_path.split(".")[0:2]))
os.remove("./data_queue/" + txt_path)
os.remove(data_path)
time.sleep(1)