Spaces:
Sleeping
Sleeping
File size: 5,883 Bytes
76f42d9 66882a0 76f42d9 bf88fb4 66882a0 bf88fb4 d0a3b36 bf88fb4 76f42d9 d0a3b36 2d857e8 d0a3b36 2d857e8 d0a3b36 ac96a59 d0a3b36 ac96a59 76f42d9 bf88fb4 d0a3b36 76f42d9 d0a3b36 bf88fb4 d0a3b36 bf88fb4 76f42d9 d0a3b36 93294e9 76f42d9 d0a3b36 93294e9 76f42d9 bf88fb4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import streamlit as st
import PyPDF2
import io
import os
import re
import nltk
from nltk.corpus import words
# Download the words corpus if not already downloaded
nltk.download('words')
# Create a set of English words for quick lookup
english_words_set = set(words.words())
# Your existing mappings
unicodeatoz = ["ब", "द", "अ", "म", "भ", "ा", "न", "ज", "ष्", "व", "प", "ि", "फ", "ल", "य", "उ", "त्र", "च", "क", "त", "ग", "ख", "ध", "ह", "थ", "श"]
unicodeAtoZ = ["ब्", "ध", "ऋ", "म्", "भ्", "ँ", "न्", "ज्", "क्ष्", "व्", "प्", "ी", "ः", "ल्", "इ", "ए", "त्त", "च्", "क्", "त्", "ग्", "ख्", "ध्", "ह्", "थ्", "श्"]
unicode0to9 = ["ण्", "ज्ञ", "द्द", "घ", "द्ध", "छ", "ट", "ठ", "ड", "ढ"]
symbolsDict = {
"~": "ञ्",
"`": "ञ",
"!": "१",
"@": "२",
"#": "३",
"$": "४",
"%": "५",
"^": "६",
"&": "७",
"*": "८",
"(": "९",
")": "०",
"-": "(",
"_": ")",
"+": "ं",
"[": "ृ",
"{": "र्",
"]": "े",
"}": "ै",
"\\": "्",
"|": "्र",
";": "स",
":": "स्",
"'": "ु",
"\"": "ू",
",": ",",
"<": "?",
".": "।",
">": "श्र",
"/": "र",
"?": "रु",
"=": ".",
"ˆ": "फ्",
"Î": "ङ्ख",
"å": "द्व",
"÷": "/"
}
def normalizePreeti(preetitxt):
normalized = ''
previoussymbol = ''
preetitxt = preetitxt.replace('qm', 's|')
preetitxt = preetitxt.replace('f]', 'ो')
preetitxt = preetitxt.replace('km', 'फ')
preetitxt = preetitxt.replace('0f', 'ण')
preetitxt = preetitxt.replace('If', 'क्ष')
preetitxt = preetitxt.replace('if', 'ष')
preetitxt = preetitxt.replace('cf', 'आ')
index = -1
while index + 1 < len(preetitxt):
index += 1
character = preetitxt[index]
try:
if preetitxt[index + 2] == '{':
if preetitxt[index + 1] == 'f' or preetitxt[index + 1] == 'ो':
normalized += '{' + character + preetitxt[index + 1]
index += 2
continue
if preetitxt[index + 1] == '{':
if character != 'f':
normalized += '{' + character
index += 1
continue
except IndexError:
pass
if character == 'l':
previoussymbol = 'l'
continue
else:
normalized += character + previoussymbol
previoussymbol = ''
return normalized
def is_english_word(word):
# Remove punctuation and convert to lowercase
word_clean = re.sub(r'\W+', '', word).lower()
return word_clean in english_words_set
def convert(preeti):
converted = ''
normalizedpreeti = normalizePreeti(preeti)
# Split the text into tokens (words and non-words)
tokens = re.findall(r'\w+|\W+', normalizedpreeti)
for token in tokens:
if re.match(r'\w+', token):
# This is a word
if is_english_word(token):
# English word, skip conversion
converted += token
else:
# Convert the word
converted_word = ''
for index, character in enumerate(token):
try:
if ord(character) >= 97 and ord(character) <= 122:
converted_word += unicodeatoz[ord(character) - 97]
elif ord(character) >= 65 and ord(character) <= 90:
converted_word += unicodeAtoZ[ord(character) - 65]
elif ord(character) >= 48 and ord(character) <= 57:
converted_word += unicode0to9[ord(character) - 48]
else:
converted_word += symbolsDict[character]
except KeyError:
converted_word += character
converted += converted_word
else:
# Non-word token (punctuation, whitespace)
converted += token
return converted
def extract_text_from_pdf(pdf_file):
text = ''
with open(pdf_file, 'rb') as file:
reader = PyPDF2.PdfReader(file)
for page in reader.pages:
text += page.extract_text()
return text
def process_file(inputfile):
ext = os.path.splitext(inputfile)[1].lower()
if ext == '.pdf':
preeti = extract_text_from_pdf(inputfile)
else:
with open(inputfile, "r") as fp:
preeti = fp.read()
return convert(preeti)
def main():
st.title("PDF/TXT to Unicode Converter")
uploaded_file = st.file_uploader("Choose a PDF or TXT file", type=["pdf", "txt"])
if uploaded_file is not None:
file_extension = os.path.splitext(uploaded_file.name)[1].lower()
if file_extension == ".pdf":
pdf_reader = PyPDF2.PdfReader(io.BytesIO(uploaded_file.read()))
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
else: # .txt file
text = uploaded_file.getvalue().decode("utf-8")
converted_text = convert(text)
st.subheader("Original Text")
st.text_area("", value=text, height=200)
st.subheader("Converted Text")
st.text_area("", value=converted_text, height=200)
# Create a download button for the converted text
st.download_button(
label="Download Converted Text",
data=converted_text.encode("utf-8"),
file_name="converted_text.txt",
mime="text/plain"
)
if __name__ == "__main__":
main()
|