diff --git a/main.py b/main.py index 64fcb1e..69afd37 100644 --- a/main.py +++ b/main.py @@ -8,7 +8,7 @@ from bot.bot import Bot from playground import build_pdf_extractor, build_googletranslator, build_unstable_diffusion, build_sketcher, \ build_dalle, \ build_whisperx, build_libretranslator, build_external_dvm, build_media_converter, build_inactive_follows_finder, \ - build_image_converter + build_image_converter, build_googletranscribe from utils.definitions import EventDefinitions from utils.dvmconfig import DVMConfig from utils.nostr_utils import check_and_set_private_key @@ -70,6 +70,10 @@ def run_nostr_dvm_with_local_config(): bot_config.SUPPORTED_DVMS.append(whisperer) # We also add Sketcher to the bot whisperer.run() + transcriptor = build_googletranscribe("Transcriptor", "speech_recognition") + bot_config.SUPPORTED_DVMS.append(transcriptor) # We also add Sketcher to the bot + transcriptor.run() + # Spawn DVM6, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. diff --git a/playground.py b/playground.py index 791b236..36938a8 100644 --- a/playground.py +++ b/playground.py @@ -9,6 +9,7 @@ from tasks.discovery_inactive_follows import DiscoverInactiveFollows from tasks.imagegeneration_openai_dalle import ImageGenerationDALLE from tasks.imagegeneration_sdxl import ImageGenerationSDXL from tasks.imagegeneration_sdxlimg2img import ImageGenerationSDXLIMG2IMG +from tasks.textextraction_google import SpeechToTextGoogle from tasks.textextraction_whisperx import SpeechToTextWhisperX from tasks.textextraction_pdf import TextExtractionPDF from tasks.translation_google import TranslationGoogle @@ -203,7 +204,7 @@ def build_whisperx(name, identifier): nip89info = { "name": name, "image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg", - "about": "I am a test dvm to extract text from media files (very beta)", + "about": "I extract text from media files with WhisperX", "nip90Params": nip90params } nip89config = NIP89Config() @@ -214,6 +215,34 @@ def build_whisperx(name, identifier): admin_config=admin_config, options=options) +def build_googletranscribe(name, identifier): + dvm_config = DVMConfig() + dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier) + dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY") + dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST") + options = {'api_key': None} + # A module might have options it can be initialized with, here we set a default model, and the nova-server + # address it should use. These parameters can be freely defined in the task component + + nip90params = { + "language": { + "required": False, + "values": ["en-US"] + } + } + nip89info = { + "name": name, + "image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg", + "about": "I extract text from media files with the Google API. I understand English by default", + "nip90Params": nip90params + } + nip89config = NIP89Config() + nip89config.DTAG = nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, + nip89info["image"]) + nip89config.CONTENT = json.dumps(nip89info) + return SpeechToTextGoogle(name=name, dvm_config=dvm_config, nip89config=nip89config, + admin_config=admin_config, options=options) + def build_sketcher(name, identifier): dvm_config = DVMConfig() dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier) diff --git a/tasks/README.md b/tasks/README.md index 54c1dc6..682cf73 100644 --- a/tasks/README.md +++ b/tasks/README.md @@ -6,13 +6,14 @@ Reusable backend functions can be defined in backends (e.g. API calls) Current List of Tasks: -| Module | Kind | Description | Backend | -|--------------------------|------|------------------------------------------------|-------------| -| TextExtractionPDF | 5000 | Extracts Text from a PDF file | local | -| SpeechToTextWhisperX | 5000 | Extracts Speech from Media files | nova-server | -| TranslationGoogle | 5002 | Translates Inputs to another language | googleAPI | -| TranslationLibre | 5002 | Translates Inputs to another language | libreAPI | -| ImageGenerationSDXL | 5100 | Generates an Image with StableDiffusionXL | nova-server | -| ImageGenerationDALLE | 5100 | Generates an Image with Dall-E | openAI | -| MediaConverter | 5200 | Converts a link of a media file and uploads it | openAI | -| DiscoverInactiveFollows | 5301 | Find inactive Nostr users | local | \ No newline at end of file +| Module | Kind | Description | Backend | +|-------------------------|------|------------------------------------------------|-------------| +| TextExtractionPDF | 5000 | Extracts Text from a PDF file | local | +| SpeechToTextWhisperX | 5000 | Extracts Speech from Media files | nova-server | +| SpeechToTextGoogle | 5000 | Extracts Speech from Media files via Google | googleAPI | +| TranslationGoogle | 5002 | Translates Inputs to another language | googleAPI | +| TranslationLibre | 5002 | Translates Inputs to another language | libreAPI | +| ImageGenerationSDXL | 5100 | Generates an Image with StableDiffusionXL | nova-server | +| ImageGenerationDALLE | 5100 | Generates an Image with Dall-E | openAI | +| MediaConverter | 5200 | Converts a link of a media file and uploads it | openAI | +| DiscoverInactiveFollows | 5301 | Find inactive Nostr users | local | \ No newline at end of file diff --git a/tasks/textextraction_google.py b/tasks/textextraction_google.py new file mode 100644 index 0000000..b454cd3 --- /dev/null +++ b/tasks/textextraction_google.py @@ -0,0 +1,125 @@ +import json +import os +import time +from multiprocessing.pool import ThreadPool +from pathlib import Path + +from backends.nova_server import check_nova_server_status, send_request_to_nova_server, send_file_to_nova_server +from interfaces.dvmtaskinterface import DVMTaskInterface +from utils.admin_utils import AdminConfig +from utils.dvmconfig import DVMConfig +from utils.mediasource_utils import organize_input_media_data +from utils.nip89_utils import NIP89Config +from utils.definitions import EventDefinitions + +""" +This File contains a Module to transform a media file input on Google Cloud + +Accepted Inputs: Url to media file (url) +Outputs: Transcribed text + +""" + + +class SpeechToTextGoogle(DVMTaskInterface): + KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT + TASK: str = "speech-to-text" + FIX_COST: float = 10 + PER_UNIT_COST: float = 0.1 + + def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, + admin_config: AdminConfig = None, options=None): + super().__init__(name, dvm_config, nip89config, admin_config, options) + if options is None: + options = {} + + def is_input_supported(self, tags): + for tag in tags: + if tag.as_vec()[0] == 'i': + input_value = tag.as_vec()[1] + input_type = tag.as_vec()[2] + if input_type != "url": + return False + + elif tag.as_vec()[0] == 'output': + output = tag.as_vec()[1] + if output == "" or not (output == "text/plain"): + print("Output format not supported, skipping..") + return False + + return True + + def create_request_form_from_nostr_event(self, event, client=None, dvm_config=None): + request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")} + + url = "" + input_type = "url" + start_time = 0 + end_time = 0 + media_format = "audio/wav" + language = "en-US" + + for tag in event.tags(): + if tag.as_vec()[0] == 'i': + input_type = tag.as_vec()[2] + if input_type == "url": + url = tag.as_vec()[1] + + elif tag.as_vec()[0] == 'param': + print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2]) + if tag.as_vec()[1] == "language": + language = tag.as_vec()[2] + elif tag.as_vec()[1] == "range": + try: + t = time.strptime(tag.as_vec()[2], "%H:%M:%S") + seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec + start_time = float(seconds) + except: + try: + t = time.strptime(tag.as_vec()[2], "%M:%S") + seconds = t.tm_min * 60 + t.tm_sec + start_time = float(seconds) + except: + start_time = tag.as_vec()[2] + try: + t = time.strptime(tag.as_vec()[3], "%H:%M:%S") + seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec + end_time = float(seconds) + except: + try: + t = time.strptime(tag.as_vec()[3], "%M:%S") + seconds = t.tm_min * 60 + t.tm_sec + end_time = float(seconds) + except: + end_time = float(tag.as_vec()[3]) + + filepath = organize_input_media_data(url, input_type, start_time, end_time, dvm_config, client, True, + media_format) + options = { + "filepath": filepath, + "language": language, + } + request_form['options'] = json.dumps(options) + return request_form + + def process(self, request_form): + import speech_recognition as sr + if self.options.get("api_key"): + api_key = self.options['api_key'] + else: + api_key = None + options = DVMTaskInterface.set_options(request_form) + # Speech recognition instance + asr = sr.Recognizer() + with sr.AudioFile(options["filepath"]) as source: + audio = asr.record(source) # read the entire audio file + + try: + # Use Google Web Speech API to recognize speech from audio data + result = asr.recognize_google(audio, language=options["language"], key=api_key) + except Exception as e: + print(e) + # If an error occurs during speech recognition, return False and the type of the exception + return "error" + + return result