File size: 4,429 Bytes
7993ea5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
from typing import Dict
from transformers import pipeline
from smolagents.tools import Tool
import torchcodec
class VisitWikiPageTool(Tool):
name = "visit_wikipage"
description = (
"Visits a Wikipedia page at the given url and reads its content as a markdown string. Use this to browse Wikipedia wepages and get their full content."
)
inputs = {
"url": {
"type": "string",
"description": "The url of the webpage to visit.",
},
"max_length": {
"type": "integer",
"description": "Maximum number of characters to include in the response. Default 40000.",
"nullable": True
}
}
output_type = "string"
def __init__(self, user_agent: str):
super().__init__()
self.headers = {"User-Agent": user_agent}
def _truncate_content(self, content: str, max_length: int) -> str:
if len(content) <= max_length:
return content
return (
content[:max_length] + f"\n..._This content has been truncated to stay below {max_length} characters_...\n"
)
def forward(self, url: str, max_length: int = 40000) -> str:
try:
import re
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
except ImportError as e:
raise ImportError(
"You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`."
) from e
try:
# Send a GET request to the URL with a 20-second timeout
response = requests.get(url, timeout=20, headers=self.headers)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
max_length = max_length if max_length is not None else 40000
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return self._truncate_content(markdown_content, max_length)
except requests.exceptions.Timeout:
return "The request timed out. Please try again later or check the URL."
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
class SpeechToTextTool(Tool):
name = "transcriber"
description = "This is a tool that transcribes an audio into text. It returns the transcribed text."
inputs = {
"audio": {
"type": "audio",
"description": "The audio to transcribe it should be bytes.",
},
"sample_rate": {
"type": "integer",
"description": "The sampling rate to use to decode the audio, defaults to 16000",
"nullable": True
}
}
output_type = "string"
def __init__(self, model: str = "openai/whisper-small"):
super().__init__()
self.pipe = pipeline("automatic-speech-recognition", model=model)
def forward(self, audio: bytes, sample_rate: int=16000) -> str:
sample_rate = sample_rate if sample_rate is not None else 16000
decoder = torchcodec.decoders.AudioDecoder(audio, sample_rate=sample_rate)
out = self.pipe(decoder)
return out["text"]
class SpeechToTextTool(Tool):
name = "transcriber"
description = "This is a tool that transcribes an audio into text. It returns the transcribed text."
inputs = {
"audio_file": {
"type": "string",
"description": "The path to the audio file to transcribe.",
},
"sample_rate": {
"type": "integer",
"description": "The sampling rate to use to decode the audio, defaults to 16000",
"nullable": True
}
}
output_type = "string"
def __init__(self, model: str = "openai/whisper-small"):
super().__init__()
self.pipe = pipeline("automatic-speech-recognition", model=model)
def forward(self, audio_file: str, sample_rate: int=16000) -> str:
sample_rate = sample_rate if sample_rate is not None else 16000
with open(audio_file, "rb") as f:
decoder = torchcodec.decoders.AudioDecoder(f, sample_rate=sample_rate)
out = self.pipe(decoder)
return out["text"]
|