mohamedrady commited on
Commit
049a2eb
·
verified ·
1 Parent(s): 8af0561

Delete alf.py

Browse files
Files changed (1) hide show
  1. alf.py +0 -212
alf.py DELETED
@@ -1,212 +0,0 @@
1
- import os
2
- import re
3
- import torch
4
- from collections import Counter
5
- from transformers import pipeline, AutoModel, AutoTokenizer, AutoModelForCausalLM
6
- import PyPDF2
7
- import openai
8
- import docx
9
- from arabert.preprocess import ArabertPreprocessor
10
-
11
-
12
-
13
-
14
- # التحقق من توفر GPU واستخدامه
15
- device = 0 if torch.cuda.is_available() else -1
16
-
17
- # تحميل نماذج BERT و GPT2
18
- arabic_bert_tokenizer = AutoTokenizer.from_pretrained("asafaya/bert-base-arabic")
19
- arabic_bert_model = AutoModel.from_pretrained("asafaya/bert-base-arabic")
20
-
21
- arabert_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv02")
22
- arabert_model = AutoModel.from_pretrained("aubmindlab/bert-base-arabertv02")
23
-
24
- gpt2_tokenizer = AutoTokenizer.from_pretrained("aubmindlab/aragpt2-large", trust_remote_code=True)
25
- gpt2_model = AutoModelForCausalLM.from_pretrained("aubmindlab/aragpt2-large", trust_remote_code=True)
26
-
27
- # إعداد المعالج النصي لـ AraBERT
28
- arabert_prep = ArabertPreprocessor("aubmindlab/bert-base-arabertv02")
29
-
30
- # دالة لتقسيم النص إلى أجزاء بناءً على عدد التوكنز
31
- def split_text_into_chunks(text, tokenizer, max_length):
32
- tokens = tokenizer.tokenize(text)
33
- chunks = []
34
- for i in range(0, len(tokens), max_length):
35
- chunk_tokens = tokens[i:i + max_length]
36
- chunk_text = tokenizer.convert_tokens_to_string(chunk_tokens)
37
- chunks.append(chunk_text)
38
- return chunks
39
-
40
- # دالة لتجزئة النص إلى جمل باستخدام التعبيرات العادية
41
- def extract_sentences(text):
42
- sentences = re.split(r'(?<=[.!؟]) +', text)
43
- return sentences
44
-
45
- # دالة لاستخراج الاقتباسات من النص
46
- def extract_quotes(text):
47
- quotes = re.findall(r'[“"«](.*?)[”"»]', text)
48
- return quotes
49
-
50
- # دالة لعد الرموز في النص
51
- def count_tokens(text, tokenizer):
52
- tokens = tokenizer.tokenize(text)
53
- return len(tokens)
54
-
55
- # دالة لاستخراج النص من ملفات PDF
56
- def extract_pdf_text(file_path):
57
- with open(file_path, "rb") as pdf_file:
58
- pdf_reader = PyPDF2.PdfReader(pdf_file)
59
- text = ""
60
- for page_num in range(len(pdf_reader.pages)):
61
- page = pdf_reader.pages[page_num]
62
- text += page.extract_text()
63
- return text
64
-
65
- # دالة لاستخراج النص من ملفات DOCX
66
- def extract_docx_text(file_path):
67
- doc = docx.Document(file_path)
68
- text = "\n".join([para.text for para in doc.paragraphs])
69
- return text
70
-
71
- # دالة لقراءة النص من ملف مع التعامل مع مشاكل الترميز
72
- def read_text_file(file_path):
73
- try:
74
- with open(file_path, "r", encoding="utf-8") as file:
75
- return file.read()
76
- except UnicodeDecodeError:
77
- try:
78
- with open(file_path, "r", encoding="latin-1") as file:
79
- return file.read()
80
- except UnicodeDecodeError:
81
- with open(file_path, "r", encoding="cp1252") as file:
82
- return file.read()
83
-
84
- # دالة لاستخراج المشاهد من النص
85
- def extract_scenes(text):
86
- scenes = re.split(r'داخلي|خارجي', text)
87
- scenes = [scene.strip() for scene in scenes if scene.strip()]
88
- return scenes
89
-
90
- # دالة لاستخراج تفاصيل المشهد (المكان والوقت)
91
- def extract_scene_details(scene):
92
- details = {}
93
- location_match = re.search(r'(داخلي|خارجي)', scene)
94
- time_match = re.search(r'(ليلاً|نهاراً|شروق|غروب)', scene)
95
-
96
- if location_match:
97
- details['location'] = location_match.group()
98
- if time_match:
99
- details['time'] = time_match.group()
100
-
101
- return details
102
-
103
- # دالة لاستخراج أعمار الشخصيات
104
- def extract_ages(text):
105
- ages = re.findall(r'\b(\d{1,2})\s*(?:عام|سنة|سنوات)\s*(?:من العمر|عمره|عمرها)', text)
106
- return ages
107
-
108
- # دالة لاستخراج وصف الشخصيات
109
- def extract_character_descriptions(text):
110
- descriptions = re.findall(r'شخصية\s*(.*?)\s*:\s*وصف\s*(.*?)\s*(?:\.|،)', text, re.DOTALL)
111
- return descriptions
112
-
113
- # دالة لاستخراج تكرار الشخصيات
114
- def extract_character_frequency(entities):
115
- persons = [ent[0] for ent in entities['PERSON']]
116
- frequency = Counter(persons)
117
- return frequency
118
-
119
- # دالة لاستخراج الحوارات وتحديد المتحدثين
120
- def extract_dialogues(text):
121
- dialogues = re.findall(r'(.*?)(?:\s*:\s*)(.*?)(?=\n|$)', text, re.DOTALL)
122
- return dialogues
123
-
124
- # دالة لمعالجة الملفات وتقسيمها بناءً على عدد التوكنز
125
- def process_files(input_directory, output_directory_950):
126
- for file_name in os.listdir(input_directory):
127
- file_path = os.path.join(input_directory, file_name)
128
-
129
- if os.path.isdir(file_path): # التأكد من أن الملف ليس مجلدًا
130
- continue
131
-
132
- if file_path.endswith(".pdf"):
133
- text = extract_pdf_text(file_path)
134
- elif file_path.endswith(".docx"):
135
- text = extract_docx_text(file_path)
136
- else:
137
- text = read_text_file(file_path)
138
-
139
- # تقسيم النص إلى أجزاء لا تتجاوز 950 توكنز
140
- chunks_950 = split_text_into_chunks(text, gpt2_tokenizer, 950)
141
- for i, chunk in enumerate(chunks_950):
142
- output_file_950 = os.path.join(output_directory_950, f"{os.path.splitext(file_name)[0]}_part_{i+1}.txt")
143
- with open(output_file_950, "w", encoding="utf-8") as file:
144
- file.write(chunk)
145
-
146
- # دالة لتحليل النصوص واستخراج المعلومات وحفظ النتائج
147
- def analyze_files(input_directory, output_directory, tokenizer, max_length):
148
- for file_name in os.listdir(input_directory):
149
- file_path = os.path.join(input_directory, file_name)
150
-
151
- if os.path.isdir(file_path): # التأكد من أن الملف ليس مجلدًا
152
- continue
153
-
154
- with open(file_path, "r", encoding="utf-8") as file:
155
- text = file.read()
156
-
157
- chunks = split_text_into_chunks(text, tokenizer, max_length)
158
-
159
- # إجراء التحليل على النصوص المقسمة
160
- for chunk in chunks:
161
- sentences = extract_sentences(chunk)
162
- quotes = extract_quotes(chunk)
163
- token_count = count_tokens(chunk, tokenizer)
164
- scenes = extract_scenes(chunk)
165
- ages = extract_ages(chunk)
166
- character_descriptions = extract_character_descriptions(chunk)
167
- dialogues = extract_dialogues(chunk)
168
- scene_details = [extract_scene_details(scene) for scene in scenes]
169
-
170
- # حفظ النتائج
171
- with open(os.path.join(output_directory, f"{file_name}_sentences.txt"), "a", encoding="utf-8") as file:
172
- file.write("\n".join(sentences))
173
-
174
-
175
- with open(os.path.join(output_directory, f"{file_name}_quotes.txt"), "a", encoding="utf-8") as file:
176
- file.write("\n".join(quotes))
177
-
178
- with open(os.path.join(output_directory, f"{file_name}_token_count.txt"), "a", encoding="utf-8") as file:
179
- file.write(str(token_count))
180
-
181
- with open(os.path.join(output_directory, f"{file_name}_scenes.txt"), "a", encoding="utf-8") as file:
182
- file.write("\n".join(scenes))
183
-
184
- with open(os.path.join(output_directory, f"{file_name}_scene_details.txt"), "a", encoding="utf-8") as file:
185
- file.write(str(scene_details))
186
-
187
- with open(os.path.join(output_directory, f"{file_name}_ages.txt"), "a", encoding="utf-8") as file:
188
- file.write(str(ages))
189
-
190
- with open(os.path.join(output_directory, f"{file_name}_character_descriptions.txt"), "a", encoding="utf-8") as file:
191
- file.write(str(character_descriptions))
192
-
193
- with open(os.path.join(output_directory, f"{file_name}_dialogues.txt"), "a", encoding="utf-8") as file:
194
- file.write(str(dialogues))
195
-
196
- # تحديد المسارات
197
- input_directory = "/Volumes/CLOCKWORK T/clockworkspace/first pro/in"
198
- output_directory_950 = "/Volumes/CLOCKWORK T/clockworkspace/first pro/1000"
199
- input_directory_950 = "/Volumes/CLOCKWORK T/clockworkspace/first pro/1000"
200
- output_directory_950_out = "/Volumes/CLOCKWORK T/clockworkspace/first pro/out/1000"
201
-
202
- # التأكد من وجود المسارات
203
- os.makedirs(output_directory_950, exist_ok=True)
204
- os.makedirs(output_directory_950_out, exist_ok=True)
205
-
206
- # معالجة الملفات وتقسيمها
207
- process_files(input_directory, output_directory_950)
208
-
209
- # تحليل الملفات المقسمة إلى 950 توكنز
210
- analyze_files(input_directory_950, output_directory_950_out, gpt2_tokenizer, 950)
211
-
212
- print("تمت معالجة الملفات وتحليلها بنجاح.")