delete unnecessary

This commit is contained in:
Samuel Lazareanu
2023-04-16 23:46:27 +03:00
parent 702f9fdf13
commit 3a2d9fac85
6 changed files with 0 additions and 372 deletions

View File

@@ -1,9 +0,0 @@
class Fonts:
fonts_path: str
fonts_size: int
fonts_chars_limit: int
def __init__(self, fonts_path, fonts_size, fonts_chars_limit):
self.fonts_path = fonts_path
self.fonts_size = fonts_size
self.fonts_chars_limit = fonts_chars_limit

Binary file not shown.

Before

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 231 KiB

View File

@@ -1,45 +0,0 @@
import moviepy.video.io.VideoFileClip as vfc
import os
from moviepy import video
def generate_darken_video(video_file, output_path):
video_clip = (vfc.VideoFileClip(video_file, audio=False)
.without_audio())
# Save the final video
darken_clip = video_clip.fl_image(darken)
darken_clip.write_videofile(output_path,
threads=8,
codec="libx264")
# Clean up the temporary files
darken_clip.close()
# A defined function to darken the frames
def darken(frame):
return frame * DARK
def generate_darken_videos(video_folder, output_folder):
# Get a list of video files in the specified folder
video_files = [f"{video_folder}/{file}" for file in os.listdir(video_folder) if file.endswith(".mp4")]
for video_file in video_files:
video_num = video_file.split('/')
video_num = video_num[len(video_num) - 1].split('.')
video_num = video_num[0]
generate_darken_video(video_file, f"{output_folder}/{video_num}.mp4")
video_folder = "E:/Bots/VideoMaker/videos/original/new ones"
output_folder = "E:/Bots/VideoMaker/videos"
DARK = 0.8
generate_darken_videos(video_folder, output_folder)
# Specific video
# video_file = "E:/Bots/VideoMaker/videos/original/7.mp4"
# output_path = "E:/Bots/VideoMaker/videos/darken 40%/7.mp4"
# generate_darken_video(video_file, output_path)

201
ffmpeg.py
View File

@@ -1,201 +0,0 @@
import os
import random
import subprocess
import re
import time
import json_handler
import verse_handler
import Fonts
import cv2
def create_dirs(output_folder, customer_name, posts=True):
# create a folder for this customer if it doesn't exist
output_path = f"{output_folder}/{customer_name}"
if not os.path.exists(output_path):
os.makedirs(output_path)
# Create folder inside for images
if not os.path.exists(f"{output_path}/verse_images"):
os.makedirs(f"{output_path}/verse_images")
if posts and not os.path.exists(f"{output_path}/post_images"):
os.makedirs(f"{output_path}/post_images")
return output_path
def create_videos(video_folder, audio_folder, json_file, fonts_dir, output_folder, text_source_font, image_file,
customer_name, number_of_videos, fonts: Fonts, posts=True):
run_time_average = 0
if number_of_videos > 1:
start_time_total = time.time()
json_data = json_handler.get_data(json_file)
verses: str = json_data[0]
refs: str = json_data[1]
videos_num = list()
audios_num = list()
fonts_num = list()
# Get lists of video and audio files in the specified folders
video_files = [f"{video_folder}/{file}" for file in os.listdir(video_folder) if file.endswith(".mp4")]
audio_files = [f"{audio_folder}/{file}" for file in os.listdir(audio_folder) if file.endswith(".mp3")]
random_for_video = random.randint(0, len(video_files) - 1)
random_for_audio = random.randint(0, len(audio_files) - 1)
random_for_font = random.randint(0, len(fonts.fonts_path) - 1)
for i in range(number_of_videos):
videos_num.append((random_for_video + i) % len(video_files))
audios_num.append((random_for_audio + i) % len(audio_files))
fonts_num.append((random_for_font + i) % len(fonts.fonts_path))
random.shuffle(videos_num)
random.shuffle(audios_num)
random.shuffle(fonts_num)
# Creating folder for customer
output_path = create_dirs(output_folder, customer_name, posts)
for i in range(number_of_videos):
start_time = time.time()
print(f"Creating Video #{i}")
text_verse = verses[i]
text_source = refs[i]
# Choose a random video file from the list
random_video_num = videos_num[0]
del videos_num[0]
video_file = video_files[random_video_num]
# video_file = f"{video_folder}/30.mp4"
# Choose a random font from list
random_font_num = fonts_num[0]
del fonts_num[0]
font_file = fonts.fonts_path[random_font_num]
font_size = fonts.fonts_size[random_font_num]
font_chars = fonts.fonts_chars_limit[random_font_num]
# Choose a random audio file from the list
random_audio_num = audios_num[0]
del audios_num[0]
audio_file = audio_files[random_audio_num]
# remove chars from versesource for the name
text_source_for_image = text_source.replace(":", "").rstrip('\n')
text_source_for_name = text_source_for_image.replace(' ', '')
file_name = f"/{i}-{text_source_for_name}_{random_video_num}_{random_audio_num}_{random_font_num}.mp4"
create_video(text_verse=text_verse, text_source=text_source, text_source_font=text_source_font,
text_source_for_image=text_source_for_image,
video_file=video_file, audio_file=audio_file, image_file=image_file,
font_file=font_file, font_size=font_size, font_chars=font_chars,
posts=posts,
output_path=output_path, file_name=file_name)
end_time = time.time()
run_time = end_time - start_time
run_time_average += run_time
print(f"\033[0;34m DONE #{i}, Run time:", round(run_time, 2), "seconds! \033[0m", output_path)
if number_of_videos > 1:
run_time_average /= number_of_videos
end_time_total = time.time()
run_time_total = end_time_total - start_time_total
print(f"\n\033[0;32mDone making {number_of_videos} videos for {customer_name}!"
f"\nTotal run time:", round(run_time_total, 2), "seconds!"
f"\nAverage run time:", round(run_time_average, 2),
"seconds = ", round(run_time_average / 60, 2), " minutes! \033[0m")
def create_video(text_verse, text_source, text_source_font, text_source_for_image, video_file, audio_file, image_file,
font_file, font_size, font_chars, output_path, file_name, posts=True):
# Coordinates of logo image and text2 clips
image_y = 1600
text2_y = 1300
# Get the video size
result = subprocess.run(
['ffprobe', '-v', 'error', '-show_entries', 'stream=width,height', '-of', 'csv=p=0:s=x', video_file],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
video_size = re.findall('\d+', result.stdout.decode())[0:2]
video_width, video_height = map(int, video_size)
# Get video duration
ffprobe_command = f'ffprobe -i "{video_file}" -show_entries format=duration -v quiet -of csv="p=0"'
video_duration = subprocess.check_output(ffprobe_command, shell=True)
video_duration = float(video_duration.decode('utf-8').strip())
# Set the start time of text
text_start_time = 1
# Create image of verse
created_verse_image = verse_handler.create_image(text_verse, font_file, font_size, font_chars,
(int(video_width), int(video_height / 2)), output_path,
text_source_for_image)
# fix bug that ':' and beyond wasn't showing on screen
text_source = text_source.replace(':', '\:')
output_path += f"/{file_name}"
# FFMPEG command to overlay images and text onto input video
ffmpeg_command = (f'ffmpeg -y -loop 1 -i "{image_file}" -i "{audio_file}" '
f'-i "{video_file}" -i "{created_verse_image}" -r 24 -filter_complex '
f'"[2:v][0:v]overlay=(W-w)/2:{image_y}[v1]; '
# f'[v1]drawtext=fontfile={selected_font}:text=\'{text_verse}\':x=(w-text_w)/2:y=(h-text_h)/2:fontsize=60:fontcolor=white:'
# f'enable=\'between(t,{text_start_time},{video_duration})\'[v2]; '
f'[v1]drawtext=fontfile=\'{text_source_font}\':text=\'{text_source}\':x=(w-text_w)/2:y={text2_y}:fontsize=42:fontcolor=white:'
f'enable=\'between(t,{text_start_time},{video_duration})\'[v2]; '
f'[v2][3:v]overlay=(W-w)/2:{video_height}/4:enable=\'between(t,{text_start_time},{video_duration})\'[v3]" '
f'-t {video_duration} -map "[v3]" -map 1:a -c:v libx264 -preset veryfast -crf 18 -c:a copy "{output_path}"')
# WITHOUT LOGO
# ffmpeg_command = (f'ffmpeg -y -i "{audio_file}" '
# f'-i "{video_file}" -i "{created_verse_image}" -r 24 -filter_complex '
# # f'[v1]drawtext=fontfile={selected_font}:text=\'{text_verse}\':x=(w-text_w)/2:y=(h-text_h)/2:fontsize=60:fontcolor=white:'
# # f'enable=\'between(t,{text_start_time},{video_duration})\'[v2]; '
# f'"drawtext=fontfile=\'{text_source_font}\':text=\'{text_source}\':x=(w-text_w)/2:y={text2_y}:fontsize=42:fontcolor=white:'
# f'enable=\'between(t,{text_start_time},{video_duration})\'[v1]; '
# f'[v1][2:v]overlay=(W-w)/2:{video_height}/4:enable=\'between(t,{text_start_time},{video_duration})\'[v2]" '
# f'-t {video_duration} -map "[v2]" -map 0:a -c:v libx264 -preset veryfast -crf 18 -c:a copy "{output_path}"')
# Run FFMPEG command
subprocess.call(ffmpeg_command, shell=True)
# if posts:
# verse_handler.create_post_images(video_path=output_path, output_folder=output_path.strip(f"/{file_name}"),
# verse_image_path=created_verse_image, text_source=text_source)
def create_post_images(video_path: str, verse_image_path, text_source, output_folder):
# Open the video file
video = cv2.VideoCapture(video_path)
# Get the frame rate of the video
fps = int(video.get(cv2.CAP_PROP_FPS))
# Set the time in seconds to extract a frame from
time_in_seconds = 2
# Calculate the frame index to extract
frame_index = time_in_seconds * fps
# Set the output image size
output_size = (1080, 1080)
# Loop through the video frames until we reach the desired frame
for i in range(frame_index):
ret, frame = video.read()
# Crop the middle square of the frame
height, width, channels = frame.shape
y = 325
cropped_frame = frame[y:y + 1440, 0:width]
# Resize the cropped frame to the output size
# resized_frame = cv2.resize(cropped_frame, output_size)
# Save the frame as an image
output_name = video_path.split('/')
output_name = output_name[len(output_name) - 1].strip(".mp4")
cv2.imwrite(f"{output_folder}/post_images/{output_name}.jpg", cropped_frame)
# Release the video file
video.release()

View File

@@ -1,117 +0,0 @@
import os
from string import ascii_letters
import cv2
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import textwrap
def create_image(text, font_path, font_size, max_char_count, image_size, save_path, text_source):
save_path += "/verse_images"
text = fix_fonts(text, font_path)
# Open a blank image
img = Image.new('RGBA', image_size, color=(190, 190, 190, 0))
# Load selected font
font = ImageFont.truetype(font=f'{font_path}', size=font_size)
# Create DrawText object
draw = ImageDraw.Draw(im=img)
# Define our text:
# Calculate the average length of a single character of our font.
# Note: this takes into account the specific font and font size.
avg_char_width = sum(font.getsize(char)[0] for char in ascii_letters) / len(ascii_letters)
# Translate this average length into a character count
max_char_count = max(int(img.size[0] * .718 / avg_char_width), max_char_count)
# Create a wrapped text object using scaled character count
new_text = textwrap.fill(text=text, width=max_char_count)
# Draw the shadow text
shadow_image = Image.new('RGBA', img.size, color=(255, 255, 255, 0))
shadow_draw = ImageDraw.Draw(im=shadow_image)
shadow_draw.text(xy=(img.size[0] / 2 - 1, img.size[1] / 2 + 4), text=new_text, font=font, fill=(0, 0, 0, 80), anchor='mm',
align='center')
# Add main text to the image
draw.text(xy=(img.size[0] / 2, img.size[1] / 2), text=new_text, font=font, fill=(255, 255, 255, 255), anchor='mm',
align='center')
# combine shadow and main
combined = Image.alpha_composite(shadow_image, img)
# check if image of this source (bible reference) exists already
path_to_check = f"{save_path}/{text_source}.png"
i = 1
while os.path.exists(path_to_check):
path_to_check = f"{save_path}/{text_source}-{i}.png"
i += 1
# Save the image
combined.save(f"{path_to_check}")
# combined.show()
return f"{path_to_check}"
def create_post_images(video_path: str, verse_image_path, text_source, output_folder):
# Open the video file
video = cv2.VideoCapture(video_path)
# Get the frame rate of the video
fps = int(video.get(cv2.CAP_PROP_FPS))
# Set the time in seconds to extract a frame from
time_in_seconds = 2
# Calculate the frame index to extract
frame_index = time_in_seconds * fps
# Set the output image size
output_size = (1080, 1080)
# Loop through the video frames until we reach the desired frame
for i in range(frame_index):
ret, frame = video.read()
# Crop the middle square of the frame
height, width, channels = frame.shape
y = int((height - width) / 2)
cropped_frame = frame[y:y+1080, 0:width]
background = Image.fromarray(cropped_frame)
verse = Image.open(verse_image_path)
combined = Image.blend(background, verse, 1)
# Create a drawing object
draw = ImageDraw.Draw(combined)
# Define the text to add and the font to use
text = 'Hello, World!'
font = ImageFont.truetype(r"C\:/Users/Samurai/AppData/Local/Microsoft/Windows/Fonts/Aloevera-OVoWO.ttf", size=36)
# Determine the position to place the text
text_width, text_height = draw.textsize(text, font=font)
x = (combined.width - text_width) / 2
y = 1300
# Add the text to the image
draw.text((x, y), text, font=font, fill=(255, 255, 255))
output_name = video_path.split('/')
output_name = output_name[len(output_name) - 1].strip(".mp4")
combined.save(f"{output_folder}/post_images/{output_name}.jpg")
# Save the frame as an image
# output_name = video_path.split('/')
# output_name = output_name[len(output_name) - 1].strip(".mp4")
# cv2.imwrite(f"{output_folder}/post_images/{output_name}.jpg", cropped_frame)
#
# Release the video file
video.release()
def fix_fonts(text, font):
# Font 6 can't display '
if (font.__contains__("FlowersSunday")):
return text.replace("'", "")
return text