artist / Sid_Dithers.py
mswhite's picture
Upload 2 files
918c38e verified
# =======================
# Imports
# =======================
import os, sys, time, re
from datetime import datetime as dt
from itertools import chain, filterfalse
from urllib.request import urlopen
from urllib.request import Request
import urllib.error
import numpy as np
from PIL import Image
# =======================
# Functions
# =======================
# Internet Functions
def check_site_redirected(starturl):
res = urllib.request.urlopen(starturl)
finalurl = res.geturl()
return finalurl
def get_links(start_html, c_regex):
html_links = []
try:
# Spoof Header to look like regular user browser
req = Request(start_html)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 AOL/9.8 AOLBuild/4346.2019.US Safari/537.36')
start_file = urlopen(req)
except urllib.error.HTTPError as e:
print('Download Failed for',start_html)
print('Status', e.code,' - Reason', e.reason,' - Url', e.url)
return html_links
start_text = str(start_file.read(),encoding="utf-8",errors="ignore") # is there an encoding problem -- eg utf-8 -- or errors="replace" - replaces the unencodable unicode to a question mark ?
start_file.close()
#print("DEBUG - sleep on link download")
#time.sleep(0.5) # DEBUG
if start_text != "": # If content exists
html_links = c_regex.findall(start_text)
return html_links
# Data Pipe functions
def pipe_inspector(link_input):
print("'",link_input,"',")
yield link_input
# ----------------
def link_from_list(link_list):
for link_input in link_list:
yield link_input
def get_test_links(link_input, c_regex):
html_links = c_regex.findall(str(link_input))
return html_links
def link_text_replace_filter(file_link):
return [file_link.replace("vids","highres")]
def link_prefix_adder(file_link):
return [prefix_adder+file_link]
def file_name_filter(file_link,c_regex):
file_name = file_link[file_link.rfind("/")+1:]
file_name = file_name[:-4]
#print(file_name)
link_filter = c_regex.findall(str(file_name))
if link_filter == []:
return ""
else:
return [file_link]
def file_name_generator(file_link):
str_template = "%2d"
file_list = []
for c in range(1,10+1):
file_name = str_template % (c)
file_list.append(file_name)
return file_list
# ========================
class link_prefix():
def __init__(self, file_prefix):
self.file_prefix = file_prefix
return
def add(self, file_link):
#print("'" + self.file_prefix + file_link + "',")
return [self.file_prefix + file_link]
class link_two_string():
def __init__(self, string_format, func1=None, func2=None):
self.string_format = string_format
self.func1 = func1
self.func2 = func2
return
def add(self, file_link):
if (self.func1 == None and self.func2 != None):
return [self.string_format % (file_link,self.func2(file_link))]
elif (self.func1!=None and self.func2==None):
return [self.string_format % (self.func1(file_link),file_link)]
elif (self.func1!=None and self.func2!=None):
return [self.string_format % (self.func1(file_link),self.func2(file_link))]
else:
return [self.string_format % (file_link,file_link)]
class link_generator():
def __init__(self, file_ext,header,start,end):
self.file_ext = file_ext
self.header = header
self.start = start
self.end = end
self.filelist = []
return
def add(self, file_link):
self.filelist = []
for c in range(self.start,self.end+1):
#filename = file_link + "/" + self.header % (c) + self.file_ext
filename = file_link + self.header % (c) + self.file_ext
self.filelist.append(filename)
return self.filelist
# Iterator Functions
def html_cal_iter(html_str,start_year,end_year,end_month):
for tgt_year in range(start_year, end_year+1):
for tgt_month in range(1,12+1):
if tgt_year < end_year or (tgt_year == end_year and tgt_month <= end_month):
yield html_str % (tgt_year,tgt_month)
def html_num_iter(html_str,start_page,end_page):
for tgt_page in range(start_page, end_page+1):
yield html_str % (tgt_page)
def html_list_iter(iter_list):
for tgt_page in iter_list:
yield tgt_page
def html_alpha_iter(html_str,start_letter=0,end_letter=26,use_upper=True):
alpha = "abcdefghijklmnopqrstuvwxyz"
alpha = alpha[start_letter:end_letter]
if use_upper:
alpha = alpha.upper()
for tgt_page in alpha:
yield html_str % (tgt_page)
# =========================
# Classes
# =========================
# Wrapper classes used for data pipes
class cDownloader:
def __init__(self, file_namer, file_extension, file_prefix="", file_start_counter=0, pause=0.2, dead_link_kill=3, snap_page=False):
# Parameters to set for processing by the data pipes
self.file_namer = file_namer
self.file_ext = file_extension
self.prefix = file_prefix
#self.html_level_prefix = html_level_prefix
self.file_count = file_start_counter
self.pause = pause
self.dead_link_kill = dead_link_kill
self.dead_links = 0
self.last_dead_link = ""
self.snap_page = snap_page
if self.snap_page:
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("window-size=2500,1200")
self.browser = webdriver.Chrome(chrome_options=options)
def __del__(self):
if self.snap_page:
self.browser.quit() # close() ?
def download(self, file_link): # This gets called externally by the data pipes
if file_link is None:
pass
else:
self.file_count += 1
add_prefix = ""
if self.file_namer == "FILENAME":
trim_file_link = file_link[:file_link.rfind(self.file_ext)+len(self.file_ext)] # Clip name if filelink has parameters after the filename
file_name = self.prefix + add_prefix + trim_file_link[trim_file_link.rfind("/")+1:] # Extract file name from link
else: # defaults to file counter
file_name = (self.prefix + add_prefix + "%06d" + self.file_ext) % self.file_count
# Attempt to download the file but check if we have exceeded the dead link count
if (self.dead_links >= self.dead_link_kill and self.last_dead_link == file_link[:file_link.rfind("/")]):
#print('ERROR: cDownloader - REASON: Exceeded dead links')
pass
elif os.path.exists(file_name):
print("WARNING: File exists ",file_name," skipping")
pass
# Begin download
if self.snap_page: # Take a snapshot of the webpage instead of downloading a file
self.browser.get(file_link)
self.browser.save_screenshot(file_name+".png") # always a png ext
time.sleep(self.pause)
else: # download the file
try:
req = Request(file_link)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 AOL/9.8 AOLBuild/4346.2019.US Safari/537.36')
remotefile = urlopen(req)
localfile = open(file_name, 'wb')
localfile.write(remotefile.read())
localfile.close()
remotefile.close()
#print("DEBUG - sleep on file download")
#time.sleep(0.5) # DEBUG
self.dead_links = 0 # successful download, reset dead link count to zero
self.last_dead_link = ""
except urllib.error.HTTPError as e:
print('ERROR: ',e.code,' REASON: ',e.reason, " => ",file_link) # Url = e.url
self.dead_links += 1 # unsuccessful download, add dead link data
self.last_dead_link = file_link[:file_link.rfind("/")] # root of file attempted
## END class cDownloader
# Data Pipe Class
class cDataPipe:
def __init__(self):
self.nLayers = 0
self.DataPipes = []
def new_pipe(self, data_pump):
self.DataPipes.append([data_input for data_input in data_pump])
def end_pipe(self, data_downloader):
self.nLayers += 1
self.DataPipes.append([data_downloader(data_input) for data_input in self.DataPipes[self.nLayers-1]])
def add_pipe(self, data_processor, c_regex=""):
self.nLayers += 1
if c_regex == "":
self.DataPipes.append(self.unique_items(self.flatten([data_processor(data_input) for data_input in self.DataPipes[self.nLayers-1]])))
else:
self.DataPipes.append(self.unique_items(self.flatten([data_processor(data_input,c_regex) for data_input in self.DataPipes[self.nLayers-1]])))
def output(self):
return list(self.DataPipes[self.nLayers])
def flow(self):
yield self.DataPipes[self.nLayers]
def flatten(self, list_of_lists):
flat_list = []
try:
flat_list = list(chain.from_iterable(list_of_lists)) # Note -- this will fail if any item is not an iterable - oth params ??
except:
print("ERROR - Non-iterable in flatten chain")
return []
return flat_list
def unique_items(self, iterable):
seen = set()
for element in filterfalse(seen.__contains__,iterable):
seen.add(element)
yield element
# ===================
# Image Processing Functions
# ===================
'''
Floyd-Steinberg Dithering Example
https://scipython.com/blog/floyd-steinberg-dithering/
Also see:
https://tannerhelland.com/2012/12/28/dithering-eleven-algorithms-source-code.html
https://en.wikipedia.org/wiki/Ordered_dithering
https://archive.ph/71e9G
Adjusted for ePaper colours - which are 7
Supported Colours
Brown R - 255 G - 128 B - 0
Green R - 0 G - 255 B - 0
White R - 255 G - 255 B - 255
Black R - 0 G - 0 B - 0
Red R - 255 G -0 B - 0
Blue R - 0 G - 0 B - 255
Yellow R - 255 G 255 B - 0
Not Supported (but would be normal 2 channel colours)
Purple R - 255 G 0 B - 255
Turq R - 0 G 255 B - 255
'''
# =================================
# Constants
# =================================
colour_map = [np.array([0,0,0]), # Black
np.array([255,0,0]), # Red
np.array([0,255,0]), # Green
np.array([0,0,255]), # Blue
np.array([255,255,0]), # Yellow
np.array([255,128,0]), # Brown
np.array([255,255,255]), # White
]
NUMBER_COLOURS = len(colour_map)
# =================================
# FUNCTIONS
# =================================
def get_new_val(old_val, nc):
"""
Get the "closest" colour to old_val in the range [0,1] per channel divided
into nc values.
"""
return np.round(old_val * (nc - 1)) / (nc - 1)
'''
Floyd-Steinberg algo dithers the image into a palette with fixed colours per channel
F = 1/16 | _ * 7 |
| 3 5 1 |
Stucki
| * 8 4 |
= 1/42 | 2 4 8 4 2 |
| 1 2 4 2 1 |
where * is the current pixel
'''
def fs_dither(img, height=448, width=600):
arr = np.array(img, dtype=int)
for ir in range(height-1): #new_height):
for ic in range(width-1): # new_width):
matches = colour_map - np.tile(arr[ir, ic],(NUMBER_COLOURS,1))
dist_match = np.apply_along_axis(np.linalg.norm,1,matches)
best_match = np.argmin(dist_match)
best_colour = colour_map[best_match]
diff_best = arr[ir,ic] - best_colour
dist_best = np.linalg.norm(diff_best)
# Clamp to best colour
arr[ir,ic] = best_colour
# Error spread to adjacent pixels
if dist_best > 88: # Error threshold to prevent unnecessary dithering - 88 = ~20% error 45 ~ 10% 66 - 15%
for factor,ir_d,ic_d in [(7,0,1),(3,1,-1),(5,1,0),(1,1,1)]:
arr[ir+ir_d, ic+ic_d][0] = min(arr[ir+ir_d, ic+ic_d][0] + round(diff_best[0] * factor/16,0),255)
arr[ir+ir_d, ic+ic_d][1] = min(arr[ir+ir_d, ic+ic_d][1] + round(diff_best[1] * factor/16,0),255)
arr[ir+ir_d, ic+ic_d][2] = min(arr[ir+ir_d, ic+ic_d][2] + round(diff_best[2] * factor/16,0),255)
return Image.fromarray(np.uint8(arr))
# Create a new white image with the specified dimensions.
# Load the JPEG image.
# Rescale the JPEG image to have a height of 448 pixels while maintaining its aspect ratio.
# Paste the rescaled image onto the white image, aligning it to the left.
# from PIL import Image
def load_image(input_image):
new_image = Image.new("RGB", (600, 448), "white") # Create a new white image with a width of 600 pixels and a height of 448 pixels
jpeg_image = Image.open(input_image) # Load the JPEG image
aspect_ratio = jpeg_image.width / jpeg_image.height # Rescale the JPEG image to have a height of 448 pixels and calculate the new width to maintain the aspect ratio
new_height = 448
new_width = int(aspect_ratio * new_height)
rescaled_image = jpeg_image.resize((new_width, new_height)) #, Image.ANTIALIAS)
new_image.paste(rescaled_image, (0, 0)) # Paste the rescaled image onto the white image, aligning it to the left
return new_image
# Save or display the result
# new_image.show() # To display the image
# white_image.save("output.jpg") # To save the image
# =================================
# MAIN PROGRAM
# =================================
'''
The display dimensions of the ePaper device is
width - 600
height - 448
We need to adjust according
TODO - rotate image based on best fit
'''
# rename files
##input_files = [file.name for file in os.scandir() if file.name[-3:]=="jpg"]
##
##for idx,input_file in enumerate(input_files):
## day_of_year = (idx % 365) + 1
## file_number = idx // 365
## new_file_name = "%03d%03d.jpg" % (day_of_year, file_number)
## print(new_file_name)
##
sys.exit()
start_time = dt.now()
print("Running",start_time)
input_files = [file.name for file in os.scandir() if file.name[-3:]=="jpg" and not os.path.exists(file.name[:-3]+"bmp")]
for input_image in input_files:
print("Processing:",input_image)
img = load_image(input_image)
dim = fs_dither(img)
if dim is not None:
dim.save(input_image[:-3]+"bmp")
else:
print("ERROR: No image returned for",input_image)
print("Finished",dt.now()-start_time)
'''
from PIL import Image
# Open the larger calendar image
calendar = Image.open("calendar.bmp")
# Open the smaller weather image
weather = Image.open("weather.bmp")
# Get the dimensions of the calendar image
calendar_width, calendar_height = calendar.size
# Get the dimensions of the weather image
weather_width, weather_height = weather.size
# Calculate the position to paste the weather image
position = (calendar_width - weather_width, calendar_height - weather_height)
# Paste the weather image onto the calendar image at the calculated position
calendar.paste(weather, position)
# Save the modified calendar image
calendar.save("calendar_with_weather.bmp")
# Optionally, show the result
calendar.show()
# test calendar code
from datetime import datetime
# Get the current date
current_date = datetime.now()
# Extract the current year and month as integers
current_year = current_date.year
current_month = current_date.month
# Print the year and month
print("Year:", current_year)
print("Month:", current_month)
current_date = datetime.now()
print(current_date.strftime("%A"))
print(current_date.strftime("%b %d, %Y"))
import calendar
yy = 2025
mm = 1
cal = calendar.month(yy, mm)
cal_lines = cal.split(chr(10))
for cal_line in cal_lines[1:]:
print(cal_line)
#print(cal)
#print(cal_lines)
#for char in cal:
# print(ord(char))
# Downloader
##pipe = cDataPipe()
##pipe.new_pipe(html_list_iter([r"https://creator.nightcafe.studio/"]))
##pipe.add_pipe(get_links,re.compile(r"(https:\/\/images\.nightcafe\.studio\/(?!\/assets)[A-z0-9\/\.\-]*jpg)"))
##pipe.add_pipe(pipe_inspector)
###pipe.end_pipe(cDownloader(file_namer="COUNTER",file_extension=".jpg").download)
##pipe.flow()
##
##sys.exit()
# width, height = img.size
#print(img.__dict__) #, width, height)
#new_width = 600
#new_height = int(height * new_width / width)
#img = img.resize((new_width, new_height), Image.ANTIALIAS)
def fs_dither_old(img, nc=2):
arr = np.array(img, dtype=float) / 255
for ir in range(new_height):
for ic in range(new_width):
# NB need to copy here for RGB arrays otherwise err will be (0,0,0)!
old_val = arr[ir, ic].copy()
new_val = get_new_val(old_val, nc)
arr[ir, ic] = new_val
err = old_val - new_val
# In this simple example, we will just ignore the border pixels.
if ic < new_width - 1:
arr[ir, ic+1] += (err * 7/16)
if ir < new_height - 1:
if ic > 0:
arr[ir+1, ic-1] += (err * 3/16)
arr[ir+1, ic] += (err * 5/16)
if ic < new_width - 1:
arr[ir+1, ic+1] += (err / 16)
carr = np.array(arr/np.max(arr, axis=(0,1)) * 255, dtype=np.uint8)
return Image.fromarray(carr)
'''