repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._split_audio_by_duration | def _split_audio_by_duration(self, audio_abs_path,
results_abs_path, duration_seconds):
"""
Calculates the length of each segment and passes it to
self._audio_segment_extractor
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav. Here, we've added `*` at staging
step, which we'll replace.
duration_seconds : int
"""
total_seconds = self._get_audio_duration_seconds(audio_abs_path)
current_segment = 0
while current_segment <= total_seconds // duration_seconds + 1:
if current_segment + duration_seconds > total_seconds:
ending_second = total_seconds
else:
ending_second = current_segment + duration_seconds
self._audio_segment_extractor(
audio_abs_path,
results_abs_path.replace("*", "{:03d}".format(
current_segment)),
starting_second=current_segment, duration=(ending_second -
current_segment))
current_segment += 1 | python | def _split_audio_by_duration(self, audio_abs_path,
results_abs_path, duration_seconds):
"""
Calculates the length of each segment and passes it to
self._audio_segment_extractor
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav. Here, we've added `*` at staging
step, which we'll replace.
duration_seconds : int
"""
total_seconds = self._get_audio_duration_seconds(audio_abs_path)
current_segment = 0
while current_segment <= total_seconds // duration_seconds + 1:
if current_segment + duration_seconds > total_seconds:
ending_second = total_seconds
else:
ending_second = current_segment + duration_seconds
self._audio_segment_extractor(
audio_abs_path,
results_abs_path.replace("*", "{:03d}".format(
current_segment)),
starting_second=current_segment, duration=(ending_second -
current_segment))
current_segment += 1 | Calculates the length of each segment and passes it to
self._audio_segment_extractor
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav. Here, we've added `*` at staging
step, which we'll replace.
duration_seconds : int | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L565-L593 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._split_audio_by_size | def _split_audio_by_size(self, audio_abs_path, results_abs_path,
chunk_size):
"""
Calculates the duration of the name.wav in order for all splits have
the size of chunk_size except possibly the last split (which will be
smaller) and then passes the duration to `split_audio_by_duration`
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav
chunk_size : int
Should be in bytes
"""
sample_rate = self._get_audio_sample_rate(audio_abs_path)
sample_bit = self._get_audio_sample_bit(audio_abs_path)
channel_num = self._get_audio_channels(audio_abs_path)
duration = 8 * chunk_size / reduce(lambda x, y: int(x) * int(y),
[sample_rate, sample_bit,
channel_num])
self._split_audio_by_duration(audio_abs_path, results_abs_path,
duration) | python | def _split_audio_by_size(self, audio_abs_path, results_abs_path,
chunk_size):
"""
Calculates the duration of the name.wav in order for all splits have
the size of chunk_size except possibly the last split (which will be
smaller) and then passes the duration to `split_audio_by_duration`
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav
chunk_size : int
Should be in bytes
"""
sample_rate = self._get_audio_sample_rate(audio_abs_path)
sample_bit = self._get_audio_sample_bit(audio_abs_path)
channel_num = self._get_audio_channels(audio_abs_path)
duration = 8 * chunk_size / reduce(lambda x, y: int(x) * int(y),
[sample_rate, sample_bit,
channel_num])
self._split_audio_by_duration(audio_abs_path, results_abs_path,
duration) | Calculates the duration of the name.wav in order for all splits have
the size of chunk_size except possibly the last split (which will be
smaller) and then passes the duration to `split_audio_by_duration`
Parameters
----------
audio_abs_path : str
results_abs_path : str
A place for adding digits needs to be added prior the the format
decleration i.e. name%03.wav
chunk_size : int
Should be in bytes | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L595-L618 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._filtering_step | def _filtering_step(self, basename):
"""
Moves the audio file if the format is `wav` to `filtered` directory.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
"""
name = ''.join(basename.split('.')[:-1])
# May cause problems if wav is not less than 9 channels.
if basename.split('.')[-1] == "wav":
if self.get_verbosity():
print("Found wave! Copying to {}/filtered/{}".format(
self.src_dir, basename))
subprocess.Popen(["cp", "{}/{}.wav".format(self.src_dir, name),
"{}/filtered/{}.wav".format(self.src_dir, name)],
universal_newlines=True).communicate() | python | def _filtering_step(self, basename):
"""
Moves the audio file if the format is `wav` to `filtered` directory.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
"""
name = ''.join(basename.split('.')[:-1])
# May cause problems if wav is not less than 9 channels.
if basename.split('.')[-1] == "wav":
if self.get_verbosity():
print("Found wave! Copying to {}/filtered/{}".format(
self.src_dir, basename))
subprocess.Popen(["cp", "{}/{}.wav".format(self.src_dir, name),
"{}/filtered/{}.wav".format(self.src_dir, name)],
universal_newlines=True).communicate() | Moves the audio file if the format is `wav` to `filtered` directory.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav` | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L620-L638 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._staging_step | def _staging_step(self, basename):
"""
Checks the size of audio file, splits it if it's needed to manage api
limit and then moves to `staged` directory while appending `*` to
the end of the filename for self.split_audio_by_duration to replace
it by a number.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
"""
name = ''.join(basename.split('.')[:-1])
if self.get_mode() == "ibm":
# Checks the file size. It's better to use 95% of the allocated
# size per file since the upper limit is not always respected.
total_size = os.path.getsize("{}/filtered/{}.wav".format(
self.src_dir, name))
if total_size >= self.ibm_api_limit_bytes:
if self.get_verbosity():
print(("{}'s size over API limit ({}). Splitting").format(
name, self.ibm_api_limit_bytes))
self._split_audio_by_size(
"{}/filtered/{}.wav".format(self.src_dir, name),
"{}/staging/{}*.wav".format(self.src_dir, name),
self.ibm_api_limit_bytes * 95 / 100)
else:
if self.get_verbosity():
print("{}'s size is fine. Moving to staging dir'".format(
name))
subprocess.Popen((
"mv {}/filtered/{}.wav {}/staging/{}000.wav").format(
self.src_dir, name, self.src_dir, name),
shell=True,
universal_newlines=True).communicate()
elif self.get_mode() == "cmu":
if self.get_verbosity():
print("Converting {} to a readable wav".format(basename))
ffmpeg = os.path.basename(find_executable("ffmpeg") or
find_executable("avconv"))
if ffmpeg is None:
raise Exception(("Either ffmpeg or avconv is needed. "
"Neither is installed or accessible"))
try:
# ffmpeg log levels:
# https://ffmpeg.org/ffmpeg.html#Generic-options
ffmpeg_log_level = "8" # fatal errors.
if self.get_verbosity():
ffmpeg_log_level = "32" # info `default for ffmpeg`
subprocess.check_call([
str(ffmpeg), "-y", "-i", "{}/filtered/{}.wav".format(
self.src_dir, str(name)), "-acodec", "pcm_s16le",
"-ac", "1", "-ar", "16000", "{}/staging/{}000.wav".format(
self.src_dir, name),
"-v", ffmpeg_log_level], universal_newlines=True)
except subprocess.CalledProcessError as e:
print(e)
if os.path.exists("{}/staging/{}000.wav".format(
self.src_dir, name)):
if self.get_verbosity():
print(("{}/filtered/{} was converted to "
"{}/staging/{}000.wav Now removing the copy of "
"{} in filtered sub directory").format(
self.src_dir, basename,
self.src_dir, name, basename))
subprocess.Popen([
"rm", "{}/filtered/{}".format(self.src_dir, basename)],
universal_newlines=True).communicate()
else:
raise Exception("Something went wrong with ffmpeg conversion!") | python | def _staging_step(self, basename):
"""
Checks the size of audio file, splits it if it's needed to manage api
limit and then moves to `staged` directory while appending `*` to
the end of the filename for self.split_audio_by_duration to replace
it by a number.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
"""
name = ''.join(basename.split('.')[:-1])
if self.get_mode() == "ibm":
# Checks the file size. It's better to use 95% of the allocated
# size per file since the upper limit is not always respected.
total_size = os.path.getsize("{}/filtered/{}.wav".format(
self.src_dir, name))
if total_size >= self.ibm_api_limit_bytes:
if self.get_verbosity():
print(("{}'s size over API limit ({}). Splitting").format(
name, self.ibm_api_limit_bytes))
self._split_audio_by_size(
"{}/filtered/{}.wav".format(self.src_dir, name),
"{}/staging/{}*.wav".format(self.src_dir, name),
self.ibm_api_limit_bytes * 95 / 100)
else:
if self.get_verbosity():
print("{}'s size is fine. Moving to staging dir'".format(
name))
subprocess.Popen((
"mv {}/filtered/{}.wav {}/staging/{}000.wav").format(
self.src_dir, name, self.src_dir, name),
shell=True,
universal_newlines=True).communicate()
elif self.get_mode() == "cmu":
if self.get_verbosity():
print("Converting {} to a readable wav".format(basename))
ffmpeg = os.path.basename(find_executable("ffmpeg") or
find_executable("avconv"))
if ffmpeg is None:
raise Exception(("Either ffmpeg or avconv is needed. "
"Neither is installed or accessible"))
try:
# ffmpeg log levels:
# https://ffmpeg.org/ffmpeg.html#Generic-options
ffmpeg_log_level = "8" # fatal errors.
if self.get_verbosity():
ffmpeg_log_level = "32" # info `default for ffmpeg`
subprocess.check_call([
str(ffmpeg), "-y", "-i", "{}/filtered/{}.wav".format(
self.src_dir, str(name)), "-acodec", "pcm_s16le",
"-ac", "1", "-ar", "16000", "{}/staging/{}000.wav".format(
self.src_dir, name),
"-v", ffmpeg_log_level], universal_newlines=True)
except subprocess.CalledProcessError as e:
print(e)
if os.path.exists("{}/staging/{}000.wav".format(
self.src_dir, name)):
if self.get_verbosity():
print(("{}/filtered/{} was converted to "
"{}/staging/{}000.wav Now removing the copy of "
"{} in filtered sub directory").format(
self.src_dir, basename,
self.src_dir, name, basename))
subprocess.Popen([
"rm", "{}/filtered/{}".format(self.src_dir, basename)],
universal_newlines=True).communicate()
else:
raise Exception("Something went wrong with ffmpeg conversion!") | Checks the size of audio file, splits it if it's needed to manage api
limit and then moves to `staged` directory while appending `*` to
the end of the filename for self.split_audio_by_duration to replace
it by a number.
Parameters
----------
basename : str
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav` | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L640-L712 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._prepare_audio | def _prepare_audio(self, basename, replace_already_indexed=False):
"""
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
"""
if basename is not None:
if basename in self.get_timestamps():
if self.get_verbosity():
print("File specified was already indexed. Reindexing...")
del self.__timestamps[basename]
self._filtering_step(basename)
self._staging_step(basename)
else:
for audio_basename in self._list_audio_files():
if audio_basename in self.__timestamps:
if replace_already_indexed:
if self.get_verbosity():
print("Already indexed {}. Reindexing...".format(
audio_basename))
del self.__timestamps[audio_basename]
else:
if self.get_verbosity():
print("Already indexed {}. Skipping...".format(
audio_basename))
continue
self._filtering_step(audio_basename)
self._staging_step(audio_basename) | python | def _prepare_audio(self, basename, replace_already_indexed=False):
"""
Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files.
"""
if basename is not None:
if basename in self.get_timestamps():
if self.get_verbosity():
print("File specified was already indexed. Reindexing...")
del self.__timestamps[basename]
self._filtering_step(basename)
self._staging_step(basename)
else:
for audio_basename in self._list_audio_files():
if audio_basename in self.__timestamps:
if replace_already_indexed:
if self.get_verbosity():
print("Already indexed {}. Reindexing...".format(
audio_basename))
del self.__timestamps[audio_basename]
else:
if self.get_verbosity():
print("Already indexed {}. Skipping...".format(
audio_basename))
continue
self._filtering_step(audio_basename)
self._staging_step(audio_basename) | Prepares and stages the audio file to be indexed.
Parameters
----------
basename : str, None
A basename of `/home/random-guy/some-audio-file.wav` is
`some-audio-file.wav`
If basename is `None`, it'll prepare all the audio files. | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L714-L746 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._index_audio_cmu | def _index_audio_cmu(self, basename=None, replace_already_indexed=False):
"""
Indexes audio with pocketsphinx. Beware that the output would not be
sufficiently accurate. Use this only if you don't want to upload your
files to IBM.
Parameters
-----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
E.g. `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
Raises
------
OSError
If the output of pocketsphinx command results in an error.
"""
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
pocketsphinx_command = ''.join([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"])
try:
if self.get_verbosity():
print("Now indexing {}".format(staging_audio_basename))
output = subprocess.check_output([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"
], universal_newlines=True).split('\n')
str_timestamps_with_sil_conf = list(map(
lambda x: x.split(" "), filter(None, output[1:])))
# Timestamps are putted in a list of a single element. To match
# Watson's output.
self.__timestamps_unregulated[
original_audio_name + ".wav"] = [(
self._timestamp_extractor_cmu(
staging_audio_basename,
str_timestamps_with_sil_conf))]
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
except OSError as e:
if self.get_verbosity():
print(e, "The command was: {}".format(
pocketsphinx_command))
self.__errors[(time(), staging_audio_basename)] = e
self._timestamp_regulator()
if self.get_verbosity():
print("Finished indexing procedure") | python | def _index_audio_cmu(self, basename=None, replace_already_indexed=False):
"""
Indexes audio with pocketsphinx. Beware that the output would not be
sufficiently accurate. Use this only if you don't want to upload your
files to IBM.
Parameters
-----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
E.g. `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
Raises
------
OSError
If the output of pocketsphinx command results in an error.
"""
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
pocketsphinx_command = ''.join([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"])
try:
if self.get_verbosity():
print("Now indexing {}".format(staging_audio_basename))
output = subprocess.check_output([
"pocketsphinx_continuous", "-infile",
str("{}/staging/{}".format(
self.src_dir, staging_audio_basename)),
"-time", "yes", "-logfn", "/dev/null"
], universal_newlines=True).split('\n')
str_timestamps_with_sil_conf = list(map(
lambda x: x.split(" "), filter(None, output[1:])))
# Timestamps are putted in a list of a single element. To match
# Watson's output.
self.__timestamps_unregulated[
original_audio_name + ".wav"] = [(
self._timestamp_extractor_cmu(
staging_audio_basename,
str_timestamps_with_sil_conf))]
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
except OSError as e:
if self.get_verbosity():
print(e, "The command was: {}".format(
pocketsphinx_command))
self.__errors[(time(), staging_audio_basename)] = e
self._timestamp_regulator()
if self.get_verbosity():
print("Finished indexing procedure") | Indexes audio with pocketsphinx. Beware that the output would not be
sufficiently accurate. Use this only if you don't want to upload your
files to IBM.
Parameters
-----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
E.g. `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
Raises
------
OSError
If the output of pocketsphinx command results in an error. | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L748-L808 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._timestamp_extractor_cmu | def _timestamp_extractor_cmu(self, staging_audio_basename,
str_timestamps_with_sil_conf):
"""
Parameters
----------
str_timestamps_with_sil_conf : [[str, str, str, str]]
Of the form [[word, starting_sec, ending_sec, confidence]]
Returns
-------
timestamps : [[str, float, float]]
"""
filter_untimed = filter(lambda x: len(x) == 4,
str_timestamps_with_sil_conf)
if filter_untimed != str_timestamps_with_sil_conf:
self.__errors[
(time(), staging_audio_basename)
] = str_timestamps_with_sil_conf
str_timestamps = [
str_timestamp[:-1]
for str_timestamp in filter_untimed
if not any([letter in {"<", ">", "/"}
for letter in ''.join(str_timestamp)])]
timestamps = list([
_WordBlock(
word=re.findall("^[^\(]+", x[0])[0],
start=round(float(x[1]), 2),
end=round(float(x[2]), 2)
) for x in str_timestamps])
return timestamps | python | def _timestamp_extractor_cmu(self, staging_audio_basename,
str_timestamps_with_sil_conf):
"""
Parameters
----------
str_timestamps_with_sil_conf : [[str, str, str, str]]
Of the form [[word, starting_sec, ending_sec, confidence]]
Returns
-------
timestamps : [[str, float, float]]
"""
filter_untimed = filter(lambda x: len(x) == 4,
str_timestamps_with_sil_conf)
if filter_untimed != str_timestamps_with_sil_conf:
self.__errors[
(time(), staging_audio_basename)
] = str_timestamps_with_sil_conf
str_timestamps = [
str_timestamp[:-1]
for str_timestamp in filter_untimed
if not any([letter in {"<", ">", "/"}
for letter in ''.join(str_timestamp)])]
timestamps = list([
_WordBlock(
word=re.findall("^[^\(]+", x[0])[0],
start=round(float(x[1]), 2),
end=round(float(x[2]), 2)
) for x in str_timestamps])
return timestamps | Parameters
----------
str_timestamps_with_sil_conf : [[str, str, str, str]]
Of the form [[word, starting_sec, ending_sec, confidence]]
Returns
-------
timestamps : [[str, float, float]] | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L810-L839 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._index_audio_ibm | def _index_audio_ibm(self, basename=None, replace_already_indexed=False,
continuous=True, model="en-US_BroadbandModel",
word_confidence=True, word_alternatives_threshold=0.9,
profanity_filter_for_US_results=False):
"""
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
"""
params = {'continuous': continuous,
'model': model,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': True,
'inactivity_timeout': str(-1),
'profanity_filter': profanity_filter_for_US_results}
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
with open("{}/staging/{}".format(
self.src_dir, staging_audio_basename), "rb") as f:
if self.get_verbosity():
print("Uploading {}...".format(staging_audio_basename))
response = requests.post(
url=("https://stream.watsonplatform.net/"
"speech-to-text/api/v1/recognize"),
auth=(self.get_username_ibm(), self.get_password_ibm()),
headers={'content-type': 'audio/wav'},
data=f.read(),
params=params)
if self.get_verbosity():
print("Indexing {}...".format(staging_audio_basename))
self.__timestamps_unregulated[
original_audio_name + ".wav"].append(
self._timestamp_extractor_ibm(
staging_audio_basename, json.loads(response.text)))
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
self._timestamp_regulator()
if self.get_verbosity():
print("Indexing procedure finished") | python | def _index_audio_ibm(self, basename=None, replace_already_indexed=False,
continuous=True, model="en-US_BroadbandModel",
word_confidence=True, word_alternatives_threshold=0.9,
profanity_filter_for_US_results=False):
"""
Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
"""
params = {'continuous': continuous,
'model': model,
'word_alternatives_threshold': word_alternatives_threshold,
'word_confidence': word_confidence,
'timestamps': True,
'inactivity_timeout': str(-1),
'profanity_filter': profanity_filter_for_US_results}
self._prepare_audio(basename=basename,
replace_already_indexed=replace_already_indexed)
for staging_audio_basename in self._list_audio_files(
sub_dir="staging"):
original_audio_name = ''.join(
staging_audio_basename.split('.')[:-1])[:-3]
with open("{}/staging/{}".format(
self.src_dir, staging_audio_basename), "rb") as f:
if self.get_verbosity():
print("Uploading {}...".format(staging_audio_basename))
response = requests.post(
url=("https://stream.watsonplatform.net/"
"speech-to-text/api/v1/recognize"),
auth=(self.get_username_ibm(), self.get_password_ibm()),
headers={'content-type': 'audio/wav'},
data=f.read(),
params=params)
if self.get_verbosity():
print("Indexing {}...".format(staging_audio_basename))
self.__timestamps_unregulated[
original_audio_name + ".wav"].append(
self._timestamp_extractor_ibm(
staging_audio_basename, json.loads(response.text)))
if self.get_verbosity():
print("Done indexing {}".format(staging_audio_basename))
self._timestamp_regulator()
if self.get_verbosity():
print("Indexing procedure finished") | Implements a search-suitable interface for Watson speech API.
Some explaination of the parameters here have been taken from [1]_
Parameters
----------
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/ | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L841-L956 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._timestamp_extractor_ibm | def _timestamp_extractor_ibm(self, staging_audio_basename, audio_json):
"""
Parameters
----------
audio_json : {str: [{str: [{str: str or nuneric}]}]}
Refer to Watson Speech API refrence [1]_
Returns
-------
[[str, float, float]]
A list whose members are lists. Each member list has three
elements. First one is a word. Second is the starting second and
the third is the ending second of that word in the original
audio file.
"""
try:
timestamps_of_sentences = [
audio_json['results'][i]['alternatives'][0]['timestamps']
for i in range(len(audio_json['results']))]
return [
_WordBlock(
word=word_block[0],
start=round(float(word_block[1]), 2),
end=round(float(word_block[2]), 2)
) for sentence_block in timestamps_of_sentences
for word_block in sentence_block]
except KeyError:
self.__errors[(time(), staging_audio_basename)] = audio_json
if self.get_verbosity():
print(audio_json)
print("The resulting request from Watson was unintelligible.")
return False | python | def _timestamp_extractor_ibm(self, staging_audio_basename, audio_json):
"""
Parameters
----------
audio_json : {str: [{str: [{str: str or nuneric}]}]}
Refer to Watson Speech API refrence [1]_
Returns
-------
[[str, float, float]]
A list whose members are lists. Each member list has three
elements. First one is a word. Second is the starting second and
the third is the ending second of that word in the original
audio file.
"""
try:
timestamps_of_sentences = [
audio_json['results'][i]['alternatives'][0]['timestamps']
for i in range(len(audio_json['results']))]
return [
_WordBlock(
word=word_block[0],
start=round(float(word_block[1]), 2),
end=round(float(word_block[2]), 2)
) for sentence_block in timestamps_of_sentences
for word_block in sentence_block]
except KeyError:
self.__errors[(time(), staging_audio_basename)] = audio_json
if self.get_verbosity():
print(audio_json)
print("The resulting request from Watson was unintelligible.")
return False | Parameters
----------
audio_json : {str: [{str: [{str: str or nuneric}]}]}
Refer to Watson Speech API refrence [1]_
Returns
-------
[[str, float, float]]
A list whose members are lists. Each member list has three
elements. First one is a word. Second is the starting second and
the third is the ending second of that word in the original
audio file. | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L958-L989 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.index_audio | def index_audio(self, *args, **kwargs):
"""
Calls the correct indexer function based on the mode.
If mode is `ibm`, _indexer_audio_ibm is called which is an interface
for Watson. Note that some of the explaination of _indexer_audio_ibm's
arguments is from [1]_
If mode is `cmu`, _indexer_audio_cmu is called which is an interface
for PocketSphinx Beware that the output would not be sufficiently
accurate. Use this only if you don't want to upload your files to IBM.
Parameters
----------
mode : {"ibm", "cmu"}
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Valid Only if mode is `ibm`
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
Valid Only if mode is `ibm`
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Valid Only if mode is `ibm`
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
Valid Only if mode is `ibm`
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Valid Only if mode is `ibm`
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
Raises
------
OSError
Valid only if mode is `cmu`.
If the output of pocketsphinx command results in an error.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
Else if mode is `cmu`, then _index_audio_cmu would be called:
"""
with _Subdirectory_Managing_Decorator(
self.src_dir, self._needed_directories):
if self.get_mode() == "ibm":
self._index_audio_ibm(*args, **kwargs)
elif self.get_mode() == "cmu":
self._index_audio_cmu(*args, **kwargs) | python | def index_audio(self, *args, **kwargs):
"""
Calls the correct indexer function based on the mode.
If mode is `ibm`, _indexer_audio_ibm is called which is an interface
for Watson. Note that some of the explaination of _indexer_audio_ibm's
arguments is from [1]_
If mode is `cmu`, _indexer_audio_cmu is called which is an interface
for PocketSphinx Beware that the output would not be sufficiently
accurate. Use this only if you don't want to upload your files to IBM.
Parameters
----------
mode : {"ibm", "cmu"}
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Valid Only if mode is `ibm`
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
Valid Only if mode is `ibm`
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Valid Only if mode is `ibm`
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
Valid Only if mode is `ibm`
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Valid Only if mode is `ibm`
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
Raises
------
OSError
Valid only if mode is `cmu`.
If the output of pocketsphinx command results in an error.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
Else if mode is `cmu`, then _index_audio_cmu would be called:
"""
with _Subdirectory_Managing_Decorator(
self.src_dir, self._needed_directories):
if self.get_mode() == "ibm":
self._index_audio_ibm(*args, **kwargs)
elif self.get_mode() == "cmu":
self._index_audio_cmu(*args, **kwargs) | Calls the correct indexer function based on the mode.
If mode is `ibm`, _indexer_audio_ibm is called which is an interface
for Watson. Note that some of the explaination of _indexer_audio_ibm's
arguments is from [1]_
If mode is `cmu`, _indexer_audio_cmu is called which is an interface
for PocketSphinx Beware that the output would not be sufficiently
accurate. Use this only if you don't want to upload your files to IBM.
Parameters
----------
mode : {"ibm", "cmu"}
basename : str, optional
A specific basename to be indexed and is placed in src_dir
e.g `audio.wav`.
If `None` is selected, all the valid audio files would be indexed.
Default is `None`.
replace_already_indexed : bool
`True`, To reindex some audio file that's already in the
timestamps.
Default is `False`.
continuous : bool
Valid Only if mode is `ibm`
Indicates whether multiple final results that represent consecutive
phrases separated by long pauses are returned.
If true, such phrases are returned; if false (the default),
recognition ends after the first end-of-speech (EOS) incident is
detected.
Default is `True`.
model : {
'ar-AR_BroadbandModel',
'en-UK_BroadbandModel'
'en-UK_NarrowbandModel',
'en-US_BroadbandModel', (the default)
'en-US_NarrowbandModel',
'es-ES_BroadbandModel',
'es-ES_NarrowbandModel',
'fr-FR_BroadbandModel',
'ja-JP_BroadbandModel',
'ja-JP_NarrowbandModel',
'pt-BR_BroadbandModel',
'pt-BR_NarrowbandModel',
'zh-CN_BroadbandModel',
'zh-CN_NarrowbandModel'
}
Valid Only if mode is `ibm`
The identifier of the model to be used for the recognition
Default is 'en-US_BroadbandModel'
word_confidence : bool
Valid Only if mode is `ibm`
Indicates whether a confidence measure in the range of 0 to 1 is
returned for each word.
The default is True. (It's False in the original)
word_alternatives_threshold : numeric
Valid Only if mode is `ibm`
A confidence value that is the lower bound for identifying a
hypothesis as a possible word alternative (also known as
"Confusion Networks"). An alternative word is considered if its
confidence is greater than or equal to the threshold. Specify a
probability between 0 and 1 inclusive.
Default is `0.9`.
profanity_filter_for_US_results : bool
Valid Only if mode is `ibm`
Indicates whether profanity filtering is performed on the
transcript. If true, the service filters profanity from all output
by replacing inappropriate words with a series of asterisks.
If false, the service returns results with no censoring. Applies
to US English transcription only.
Default is `False`.
Raises
------
OSError
Valid only if mode is `cmu`.
If the output of pocketsphinx command results in an error.
References
----------
.. [1] : https://ibm.com/watson/developercloud/speech-to-text/api/v1/
Else if mode is `cmu`, then _index_audio_cmu would be called: | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L991-L1110 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._timestamp_regulator | def _timestamp_regulator(self):
"""
Makes a dictionary whose keys are audio file basenames and whose
values are a list of word blocks from unregulated timestamps and
updates the main timestamp attribute. After all done, purges
unregulated ones.
In case the audio file was large enough to be splitted, it adds seconds
to correct timing and in case the timestamp was manually loaded, it
leaves it alone.
Note that the difference between self.__timestamps and
self.__timestamps_unregulated is that in the regulated version,
right after the word, a list of word blocks must appear. However in the
unregulated version, after a word, a list of individual splits
containing word blocks would appear!
"""
unified_timestamps = _PrettyDefaultDict(list)
staged_files = self._list_audio_files(sub_dir="staging")
for timestamp_basename in self.__timestamps_unregulated:
if len(self.__timestamps_unregulated[timestamp_basename]) > 1:
# File has been splitted
timestamp_name = ''.join(timestamp_basename.split('.')[:-1])
staged_splitted_files_of_timestamp = list(
filter(lambda staged_file: (
timestamp_name == staged_file[:-3] and
all([(x in set(map(str, range(10))))
for x in staged_file[-3:]])), staged_files))
if len(staged_splitted_files_of_timestamp) == 0:
self.__errors[(time(), timestamp_basename)] = {
"reason": "Missing staged file",
"current_staged_files": staged_files}
continue
staged_splitted_files_of_timestamp.sort()
unified_timestamp = list()
for staging_digits, splitted_file in enumerate(
self.__timestamps_unregulated[timestamp_basename]):
prev_splits_sec = 0
if int(staging_digits) != 0:
prev_splits_sec = self._get_audio_duration_seconds(
"{}/staging/{}{:03d}".format(
self.src_dir, timestamp_name,
staging_digits - 1))
for word_block in splitted_file:
unified_timestamp.append(
_WordBlock(
word=word_block.word,
start=round(word_block.start +
prev_splits_sec, 2),
end=round(word_block.end +
prev_splits_sec, 2)))
unified_timestamps[
str(timestamp_basename)] += unified_timestamp
else:
unified_timestamps[
timestamp_basename] += self.__timestamps_unregulated[
timestamp_basename][0]
self.__timestamps.update(unified_timestamps)
self.__timestamps_unregulated = _PrettyDefaultDict(list) | python | def _timestamp_regulator(self):
"""
Makes a dictionary whose keys are audio file basenames and whose
values are a list of word blocks from unregulated timestamps and
updates the main timestamp attribute. After all done, purges
unregulated ones.
In case the audio file was large enough to be splitted, it adds seconds
to correct timing and in case the timestamp was manually loaded, it
leaves it alone.
Note that the difference between self.__timestamps and
self.__timestamps_unregulated is that in the regulated version,
right after the word, a list of word blocks must appear. However in the
unregulated version, after a word, a list of individual splits
containing word blocks would appear!
"""
unified_timestamps = _PrettyDefaultDict(list)
staged_files = self._list_audio_files(sub_dir="staging")
for timestamp_basename in self.__timestamps_unregulated:
if len(self.__timestamps_unregulated[timestamp_basename]) > 1:
# File has been splitted
timestamp_name = ''.join(timestamp_basename.split('.')[:-1])
staged_splitted_files_of_timestamp = list(
filter(lambda staged_file: (
timestamp_name == staged_file[:-3] and
all([(x in set(map(str, range(10))))
for x in staged_file[-3:]])), staged_files))
if len(staged_splitted_files_of_timestamp) == 0:
self.__errors[(time(), timestamp_basename)] = {
"reason": "Missing staged file",
"current_staged_files": staged_files}
continue
staged_splitted_files_of_timestamp.sort()
unified_timestamp = list()
for staging_digits, splitted_file in enumerate(
self.__timestamps_unregulated[timestamp_basename]):
prev_splits_sec = 0
if int(staging_digits) != 0:
prev_splits_sec = self._get_audio_duration_seconds(
"{}/staging/{}{:03d}".format(
self.src_dir, timestamp_name,
staging_digits - 1))
for word_block in splitted_file:
unified_timestamp.append(
_WordBlock(
word=word_block.word,
start=round(word_block.start +
prev_splits_sec, 2),
end=round(word_block.end +
prev_splits_sec, 2)))
unified_timestamps[
str(timestamp_basename)] += unified_timestamp
else:
unified_timestamps[
timestamp_basename] += self.__timestamps_unregulated[
timestamp_basename][0]
self.__timestamps.update(unified_timestamps)
self.__timestamps_unregulated = _PrettyDefaultDict(list) | Makes a dictionary whose keys are audio file basenames and whose
values are a list of word blocks from unregulated timestamps and
updates the main timestamp attribute. After all done, purges
unregulated ones.
In case the audio file was large enough to be splitted, it adds seconds
to correct timing and in case the timestamp was manually loaded, it
leaves it alone.
Note that the difference between self.__timestamps and
self.__timestamps_unregulated is that in the regulated version,
right after the word, a list of word blocks must appear. However in the
unregulated version, after a word, a list of individual splits
containing word blocks would appear! | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1112-L1170 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.save_indexed_audio | def save_indexed_audio(self, indexed_audio_file_abs_path):
"""
Writes the corrected timestamps to a file. Timestamps are a python
dictionary.
Parameters
----------
indexed_audio_file_abs_path : str
"""
with open(indexed_audio_file_abs_path, "wb") as f:
pickle.dump(self.get_timestamps(), f, pickle.HIGHEST_PROTOCOL) | python | def save_indexed_audio(self, indexed_audio_file_abs_path):
"""
Writes the corrected timestamps to a file. Timestamps are a python
dictionary.
Parameters
----------
indexed_audio_file_abs_path : str
"""
with open(indexed_audio_file_abs_path, "wb") as f:
pickle.dump(self.get_timestamps(), f, pickle.HIGHEST_PROTOCOL) | Writes the corrected timestamps to a file. Timestamps are a python
dictionary.
Parameters
----------
indexed_audio_file_abs_path : str | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1172-L1182 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.load_indexed_audio | def load_indexed_audio(self, indexed_audio_file_abs_path):
"""
Parameters
----------
indexed_audio_file_abs_path : str
"""
with open(indexed_audio_file_abs_path, "rb") as f:
self.__timestamps = pickle.load(f) | python | def load_indexed_audio(self, indexed_audio_file_abs_path):
"""
Parameters
----------
indexed_audio_file_abs_path : str
"""
with open(indexed_audio_file_abs_path, "rb") as f:
self.__timestamps = pickle.load(f) | Parameters
----------
indexed_audio_file_abs_path : str | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1184-L1191 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._is_subsequence_of | def _is_subsequence_of(self, sub, sup):
"""
Parameters
----------
sub : str
sup : str
Returns
-------
bool
"""
return bool(re.search(".*".join(sub), sup)) | python | def _is_subsequence_of(self, sub, sup):
"""
Parameters
----------
sub : str
sup : str
Returns
-------
bool
"""
return bool(re.search(".*".join(sub), sup)) | Parameters
----------
sub : str
sup : str
Returns
-------
bool | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1206-L1217 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer._partial_search_validator | def _partial_search_validator(self, sub, sup, anagram=False,
subsequence=False, supersequence=False):
"""
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
"""
def get_all_in(one, another):
for element in one:
if element in another:
yield element
def containment_check(sub, sup):
return (set(Counter(sub).keys()).issubset(
set(Counter(sup).keys())))
def containment_freq_check(sub, sup):
return (all([Counter(sub)[element] <= Counter(sup)[element]
for element in Counter(sub)]))
def extra_freq_check(sub, sup, list_of_tups):
# Would be used for matching anagrams, subsequences etc.
return (len(list_of_tups) > 0 and
all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]]
for tup in list_of_tups]))
# Regarding containment checking while having extra conditions,
# there's no good way to map each anagram or subseuqnece etc. that was
# found to the query word, without making it more complicated than
# it already is, because a query word can be anagram/subsequence etc.
# to multiple words of the timestamps yet finding the one with the
# right index would be the problem.
# Therefore we just approximate the solution by just counting
# the elements.
if len(sub) > len(sup):
return False
for pred, func in set([(anagram, self._is_anagram_of),
(subsequence, self._is_subsequence_of),
(supersequence, self._is_supersequence_of)]):
if pred:
pred_seive = [(sub_key, sup_key)
for sub_key in set(Counter(sub).keys())
for sup_key in set(Counter(sup).keys())
if func(sub_key, sup_key)]
if not extra_freq_check(sub, sup, pred_seive):
return False
if (
not any([anagram, subsequence, supersequence]) and
(not containment_check(sub, sup) or
not containment_freq_check(sub, sup))
):
return False
for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)):
if x1 != x2:
return False
return True | python | def _partial_search_validator(self, sub, sup, anagram=False,
subsequence=False, supersequence=False):
"""
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
"""
def get_all_in(one, another):
for element in one:
if element in another:
yield element
def containment_check(sub, sup):
return (set(Counter(sub).keys()).issubset(
set(Counter(sup).keys())))
def containment_freq_check(sub, sup):
return (all([Counter(sub)[element] <= Counter(sup)[element]
for element in Counter(sub)]))
def extra_freq_check(sub, sup, list_of_tups):
# Would be used for matching anagrams, subsequences etc.
return (len(list_of_tups) > 0 and
all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]]
for tup in list_of_tups]))
# Regarding containment checking while having extra conditions,
# there's no good way to map each anagram or subseuqnece etc. that was
# found to the query word, without making it more complicated than
# it already is, because a query word can be anagram/subsequence etc.
# to multiple words of the timestamps yet finding the one with the
# right index would be the problem.
# Therefore we just approximate the solution by just counting
# the elements.
if len(sub) > len(sup):
return False
for pred, func in set([(anagram, self._is_anagram_of),
(subsequence, self._is_subsequence_of),
(supersequence, self._is_supersequence_of)]):
if pred:
pred_seive = [(sub_key, sup_key)
for sub_key in set(Counter(sub).keys())
for sup_key in set(Counter(sup).keys())
if func(sub_key, sup_key)]
if not extra_freq_check(sub, sup, pred_seive):
return False
if (
not any([anagram, subsequence, supersequence]) and
(not containment_check(sub, sup) or
not containment_freq_check(sub, sup))
):
return False
for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)):
if x1 != x2:
return False
return True | It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist` | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1232-L1318 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.search_gen | def search_gen(self, query, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
A generator that searches for the `query` within the audiofiles of the
src_dir.
Parameters
----------
query : str
A string that'll be searched. It'll be splitted on spaces and then
each word gets sequentially searched.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`
case_sensitive : bool, optional
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Yields
------
{"File Name": str, "Query": `query`, "Result": (float, float)}
The result of the search is returned as a tuple which is the value
of the "Result" key. The first element of the tuple is the
starting second of `query` and the last element is the ending
second of `query`
Raises
------
AssertionError
If `missing_word_tolerance` value is more than the total number of
words in the query minus 2 (since the first and the last word
cannot be removed)
"""
def case_sensitivity_handler(case_sensitive=case_sensitive):
def get_query_words(query, case_sensitive=case_sensitive):
query_words = list(
filter(None, ''.join(
filter(lambda char: char in (ascii_letters + " "),
list(query))).split(" ")))
if case_sensitive:
return query_words
return [q.lower() for q in query_words]
def get_timestamps(case_sensitive=case_sensitive):
timestamps = self.get_timestamps().copy()
if not case_sensitive:
return {
audio_basename: [
_WordBlock(word=word_block.word.lower(),
start=word_block.start,
end=word_block.end)
for word_block in timestamps[audio_basename]]
for audio_basename in timestamps}
return timestamps
return locals()
query_words = case_sensitivity_handler()["get_query_words"](query)
timestamps = case_sensitivity_handler()["get_timestamps"]()
assert abs(missing_word_tolerance -
(len(query_words) - 2)) >= 0, (
"The number of words that can be missing must be less than "
"the total number of words within the query minus the first and "
"the last word."
)
for audio_filename in (
(lambda: (timestamps.keys() if audio_basename is None else
[audio_basename]))()):
result = list()
missed_words_so_far = 0
query_cursor = 0
try:
for word_block in timestamps[audio_filename]:
if (
# When the query is identical
(word_block.word == query_words[query_cursor]) or
# When the query is a subsequence of what's
# available
(subsequence and
self._is_subsequence_of(query_words[query_cursor],
word_block.word)) or
# When the query is a supersequence of what's
# available
(supersequence and self._is_supersequence_of(
query_words[query_cursor], word_block.word)) or
# When query is a permutation of what's available.
(anagram and self._is_anagram_of(
query_words[query_cursor], word_block.word))
):
result.append(word_block)
if timing_error is not None:
try:
if round(result[-1].start -
result[-2].end, 4) > timing_error:
result = list()
query_cursor = 0
except IndexError:
pass
if self._partial_search_validator(
query_words, [x.word for x in result],
anagram=anagram,
subsequence=subsequence,
supersequence=supersequence):
yield {
"File Name": audio_filename,
"Query": query,
"Result": tuple([result[0].start,
result[-1].end])}
result = list()
query_cursor = 0
else:
query_cursor += 1
elif missed_words_so_far > missing_word_tolerance:
result = list()
query_cursor = 0
elif (missing_word_tolerance > 0) and (len(result) > 0):
result.append(word_block)
missed_words_so_far += 1
except KeyError:
# This is needed for the case where no timestamp is present.
pass
except IndexError:
# This is needed when multiple timestamps are present, and
# advanced control structures like `missed_word_tolerance` are
# non-zero. In that case, it can search to the end of the first
# timestamp looking to complete its partial result and since
# there are no more `word_block`s left, it returns an error.
# `continue` should be used to reset the partial result and
# move to the next timestamp.
continue | python | def search_gen(self, query, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
A generator that searches for the `query` within the audiofiles of the
src_dir.
Parameters
----------
query : str
A string that'll be searched. It'll be splitted on spaces and then
each word gets sequentially searched.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`
case_sensitive : bool, optional
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Yields
------
{"File Name": str, "Query": `query`, "Result": (float, float)}
The result of the search is returned as a tuple which is the value
of the "Result" key. The first element of the tuple is the
starting second of `query` and the last element is the ending
second of `query`
Raises
------
AssertionError
If `missing_word_tolerance` value is more than the total number of
words in the query minus 2 (since the first and the last word
cannot be removed)
"""
def case_sensitivity_handler(case_sensitive=case_sensitive):
def get_query_words(query, case_sensitive=case_sensitive):
query_words = list(
filter(None, ''.join(
filter(lambda char: char in (ascii_letters + " "),
list(query))).split(" ")))
if case_sensitive:
return query_words
return [q.lower() for q in query_words]
def get_timestamps(case_sensitive=case_sensitive):
timestamps = self.get_timestamps().copy()
if not case_sensitive:
return {
audio_basename: [
_WordBlock(word=word_block.word.lower(),
start=word_block.start,
end=word_block.end)
for word_block in timestamps[audio_basename]]
for audio_basename in timestamps}
return timestamps
return locals()
query_words = case_sensitivity_handler()["get_query_words"](query)
timestamps = case_sensitivity_handler()["get_timestamps"]()
assert abs(missing_word_tolerance -
(len(query_words) - 2)) >= 0, (
"The number of words that can be missing must be less than "
"the total number of words within the query minus the first and "
"the last word."
)
for audio_filename in (
(lambda: (timestamps.keys() if audio_basename is None else
[audio_basename]))()):
result = list()
missed_words_so_far = 0
query_cursor = 0
try:
for word_block in timestamps[audio_filename]:
if (
# When the query is identical
(word_block.word == query_words[query_cursor]) or
# When the query is a subsequence of what's
# available
(subsequence and
self._is_subsequence_of(query_words[query_cursor],
word_block.word)) or
# When the query is a supersequence of what's
# available
(supersequence and self._is_supersequence_of(
query_words[query_cursor], word_block.word)) or
# When query is a permutation of what's available.
(anagram and self._is_anagram_of(
query_words[query_cursor], word_block.word))
):
result.append(word_block)
if timing_error is not None:
try:
if round(result[-1].start -
result[-2].end, 4) > timing_error:
result = list()
query_cursor = 0
except IndexError:
pass
if self._partial_search_validator(
query_words, [x.word for x in result],
anagram=anagram,
subsequence=subsequence,
supersequence=supersequence):
yield {
"File Name": audio_filename,
"Query": query,
"Result": tuple([result[0].start,
result[-1].end])}
result = list()
query_cursor = 0
else:
query_cursor += 1
elif missed_words_so_far > missing_word_tolerance:
result = list()
query_cursor = 0
elif (missing_word_tolerance > 0) and (len(result) > 0):
result.append(word_block)
missed_words_so_far += 1
except KeyError:
# This is needed for the case where no timestamp is present.
pass
except IndexError:
# This is needed when multiple timestamps are present, and
# advanced control structures like `missed_word_tolerance` are
# non-zero. In that case, it can search to the end of the first
# timestamp looking to complete its partial result and since
# there are no more `word_block`s left, it returns an error.
# `continue` should be used to reset the partial result and
# move to the next timestamp.
continue | A generator that searches for the `query` within the audiofiles of the
src_dir.
Parameters
----------
query : str
A string that'll be searched. It'll be splitted on spaces and then
each word gets sequentially searched.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`
case_sensitive : bool, optional
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Yields
------
{"File Name": str, "Query": `query`, "Result": (float, float)}
The result of the search is returned as a tuple which is the value
of the "Result" key. The first element of the tuple is the
starting second of `query` and the last element is the ending
second of `query`
Raises
------
AssertionError
If `missing_word_tolerance` value is more than the total number of
words in the query minus 2 (since the first and the last word
cannot be removed) | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1320-L1501 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.search_all | def search_all(self, queries, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
Returns a dictionary of all results of all of the queries for all of
the audio files.
All the specified parameters work per query.
Parameters
----------
queries : [str] or str
A list of the strings that'll be searched. If type of queries is
`str`, it'll be insterted into a list within the body of the
method.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`.
case_sensitive : bool
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
Raises
------
TypeError
if `queries` is neither a list nor a str
"""
search_gen_rest_of_kwargs = {
"audio_basename": audio_basename,
"case_sensitive": case_sensitive,
"subsequence": subsequence,
"supersequence": supersequence,
"timing_error": timing_error,
"anagram": anagram,
"missing_word_tolerance": missing_word_tolerance}
if not isinstance(queries, (list, str)):
raise TypeError("Invalid query type.")
if type(queries) is not list:
queries = [queries]
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for query in queries:
search_gen = self.search_gen(query=query,
**search_gen_rest_of_kwargs)
for search_result in search_gen:
search_results[query][
search_result["File Name"]].append(search_result["Result"])
return search_results | python | def search_all(self, queries, audio_basename=None, case_sensitive=False,
subsequence=False, supersequence=False, timing_error=0.0,
anagram=False, missing_word_tolerance=0):
"""
Returns a dictionary of all results of all of the queries for all of
the audio files.
All the specified parameters work per query.
Parameters
----------
queries : [str] or str
A list of the strings that'll be searched. If type of queries is
`str`, it'll be insterted into a list within the body of the
method.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`.
case_sensitive : bool
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
Raises
------
TypeError
if `queries` is neither a list nor a str
"""
search_gen_rest_of_kwargs = {
"audio_basename": audio_basename,
"case_sensitive": case_sensitive,
"subsequence": subsequence,
"supersequence": supersequence,
"timing_error": timing_error,
"anagram": anagram,
"missing_word_tolerance": missing_word_tolerance}
if not isinstance(queries, (list, str)):
raise TypeError("Invalid query type.")
if type(queries) is not list:
queries = [queries]
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for query in queries:
search_gen = self.search_gen(query=query,
**search_gen_rest_of_kwargs)
for search_result in search_gen:
search_results[query][
search_result["File Name"]].append(search_result["Result"])
return search_results | Returns a dictionary of all results of all of the queries for all of
the audio files.
All the specified parameters work per query.
Parameters
----------
queries : [str] or str
A list of the strings that'll be searched. If type of queries is
`str`, it'll be insterted into a list within the body of the
method.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `None`.
case_sensitive : bool
Default is `False`
subsequence : bool, optional
`True` if it's not needed for the exact word be detected and larger
strings that contain the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
supersequence : bool, optional
`True` if it's not needed for the exact word be detected and
smaller strings that are contained within the given one are fine.
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
anagram : bool, optional
`True` if it's acceptable for a complete permutation of the word to
be found. e.g. "abcde" would be acceptable for "edbac".
If the query is a sentences with multiple words, it'll be
considered for each word, not the whole sentence.
Default is `False`.
timing_error : None or float, optional
Sometimes other words (almost always very small) would be detected
between the words of the `query`. This parameter defines the
timing difference/tolerance of the search.
Default is 0.0 i.e. No timing error is tolerated.
missing_word_tolerance : int, optional
The number of words that can be missed within the result.
For example, if the query is "Some random text" and the tolerance
value is `1`, then "Some text" would be a valid response.
Note that the first and last words cannot be missed. Also,
there'll be an error if the value is more than the number of
available words. For the example above, any value more than 1
would have given an error (since there's only one word i.e.
"random" that can be missed)
Default is 0.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
Raises
------
TypeError
if `queries` is neither a list nor a str | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1503-L1601 |
aalireza/SimpleAudioIndexer | SimpleAudioIndexer/__init__.py | SimpleAudioIndexer.search_regexp | def search_regexp(self, pattern, audio_basename=None):
"""
First joins the words of the word_blocks of timestamps with space, per
audio_basename. Then matches `pattern` and calculates the index of the
word_block where the first and last word of the matched result appears
in. Then presents the output like `search_all` method.
Note that the leading and trailing spaces from the matched results
would be removed while determining which word_block they belong to.
Parameters
----------
pattern : str
A regex pattern.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `False`.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
"""
def indexes_in_transcript_to_start_end_second(index_tup,
audio_basename):
"""
Calculates the word block index by having the beginning and ending
index of the matched result from the transcription
Parameters
----------
index_tup : (int, tup)
index_tup is of the form tuple(index_start, index_end)
audio_basename : str
Retrun
------
[float, float]
The time of the output of the matched result. Derived from two
separate word blocks belonging to the beginning and the end of
the index_start and index_end.
"""
space_indexes = [i for i, x in enumerate(
transcription[audio_basename]) if x == " "]
space_indexes.sort(reverse=True)
index_start, index_end = index_tup
# re.finditer returns the ending index by one more
index_end -= 1
while transcription[audio_basename][index_start] == " ":
index_start += 1
while transcription[audio_basename][index_end] == " ":
index_end -= 1
block_number_start = 0
block_number_end = len(space_indexes)
for block_cursor, space_index in enumerate(space_indexes):
if index_start > space_index:
block_number_start = (len(space_indexes) - block_cursor)
break
for block_cursor, space_index in enumerate(space_indexes):
if index_end > space_index:
block_number_end = (len(space_indexes) - block_cursor)
break
return (timestamps[audio_basename][block_number_start].start,
timestamps[audio_basename][block_number_end].end)
timestamps = self.get_timestamps()
if audio_basename is not None:
timestamps = {audio_basename: timestamps[audio_basename]}
transcription = {
audio_basename: ' '.join(
[word_block.word for word_block in timestamps[audio_basename]]
) for audio_basename in timestamps}
match_map = map(
lambda audio_basename: tuple((
audio_basename,
re.finditer(pattern, transcription[audio_basename]))),
transcription.keys())
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for audio_basename, match_iter in match_map:
for match in match_iter:
search_results[match.group()][audio_basename].append(
tuple(indexes_in_transcript_to_start_end_second(
match.span(), audio_basename)))
return search_results | python | def search_regexp(self, pattern, audio_basename=None):
"""
First joins the words of the word_blocks of timestamps with space, per
audio_basename. Then matches `pattern` and calculates the index of the
word_block where the first and last word of the matched result appears
in. Then presents the output like `search_all` method.
Note that the leading and trailing spaces from the matched results
would be removed while determining which word_block they belong to.
Parameters
----------
pattern : str
A regex pattern.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `False`.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}}
"""
def indexes_in_transcript_to_start_end_second(index_tup,
audio_basename):
"""
Calculates the word block index by having the beginning and ending
index of the matched result from the transcription
Parameters
----------
index_tup : (int, tup)
index_tup is of the form tuple(index_start, index_end)
audio_basename : str
Retrun
------
[float, float]
The time of the output of the matched result. Derived from two
separate word blocks belonging to the beginning and the end of
the index_start and index_end.
"""
space_indexes = [i for i, x in enumerate(
transcription[audio_basename]) if x == " "]
space_indexes.sort(reverse=True)
index_start, index_end = index_tup
# re.finditer returns the ending index by one more
index_end -= 1
while transcription[audio_basename][index_start] == " ":
index_start += 1
while transcription[audio_basename][index_end] == " ":
index_end -= 1
block_number_start = 0
block_number_end = len(space_indexes)
for block_cursor, space_index in enumerate(space_indexes):
if index_start > space_index:
block_number_start = (len(space_indexes) - block_cursor)
break
for block_cursor, space_index in enumerate(space_indexes):
if index_end > space_index:
block_number_end = (len(space_indexes) - block_cursor)
break
return (timestamps[audio_basename][block_number_start].start,
timestamps[audio_basename][block_number_end].end)
timestamps = self.get_timestamps()
if audio_basename is not None:
timestamps = {audio_basename: timestamps[audio_basename]}
transcription = {
audio_basename: ' '.join(
[word_block.word for word_block in timestamps[audio_basename]]
) for audio_basename in timestamps}
match_map = map(
lambda audio_basename: tuple((
audio_basename,
re.finditer(pattern, transcription[audio_basename]))),
transcription.keys())
search_results = _PrettyDefaultDict(lambda: _PrettyDefaultDict(list))
for audio_basename, match_iter in match_map:
for match in match_iter:
search_results[match.group()][audio_basename].append(
tuple(indexes_in_transcript_to_start_end_second(
match.span(), audio_basename)))
return search_results | First joins the words of the word_blocks of timestamps with space, per
audio_basename. Then matches `pattern` and calculates the index of the
word_block where the first and last word of the matched result appears
in. Then presents the output like `search_all` method.
Note that the leading and trailing spaces from the matched results
would be removed while determining which word_block they belong to.
Parameters
----------
pattern : str
A regex pattern.
audio_basename : str, optional
Search only within the given audio_basename.
Default is `False`.
Returns
-------
search_results : {str: {str: [(float, float)]}}
A dictionary whose keys are queries and whose values are
dictionaries whose keys are all the audiofiles in which the query
is present and whose values are a list whose elements are 2-tuples
whose first element is the starting second of the query and whose
values are the ending second. e.g.
{"apple": {"fruits.wav" : [(1.1, 1.12)]}} | https://github.com/aalireza/SimpleAudioIndexer/blob/73f9d75897d785bdaea9d28dde5fa48104428164/SimpleAudioIndexer/__init__.py#L1603-L1693 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rwh_primes1 | def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
''' Returns a list of primes < n '''
sieve = [True] * (n/2)
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i/2]:
sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)
return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]] | python | def rwh_primes1(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
''' Returns a list of primes < n '''
sieve = [True] * (n/2)
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i/2]:
sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)
return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]] | Returns a list of primes < n | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L117-L124 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | find_prime_polys | def find_prime_polys(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = xrange(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = 1
for i in xrange(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = gf_mult_noLUT(x, generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes | python | def find_prime_polys(generator=2, c_exp=8, fast_primes=False, single=False):
'''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.'''
# fast_primes will output less results but will be significantly faster.
# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.
# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.
# Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.
# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.
# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.
# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).
# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf
# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.
# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field
root_charac = 2 # we're in GF(2)
field_charac = int(root_charac**c_exp - 1)
field_charac_next = int(root_charac**(c_exp+1) - 1)
prim_candidates = []
if fast_primes:
prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible
prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes
else:
prim_candidates = xrange(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)
# Start of the main loop
correct_primes = []
for prim in prim_candidates: # try potential candidates primitive irreducible polys
seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)
conflict = False # flag to know if there was at least one conflict
# Second loop, build the whole Galois Field
x = 1
for i in xrange(field_charac):
# Compute the next value in the field (ie, the next power of alpha/generator)
x = gf_mult_noLUT(x, generator, prim, field_charac+1)
# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)
if x > field_charac or seen[x] == 1:
conflict = True
break
# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha
else:
seen[x] = 1
# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!
if not conflict:
correct_primes.append(prim)
if single: return prim
# Return the list of all prime polynomials
return correct_primes | Compute the list of prime polynomials for the given generator and galois field characteristic exponent. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L126-L178 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | init_tables | def init_tables(prim=0x11d, generator=2, c_exp=8):
'''Precompute the logarithm and anti-log tables for faster computation later, using the provided primitive polynomial.
These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2.
The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y.
That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables.
For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks
'''
# generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books.
# prim is the primitive/prime (binary) polynomial and must be irreducible (ie, it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between field_charac+1 and field_charac*2, and not a list of gf values. The prime polynomial will be used to reduce the overflows back into the range of the Galois Field without duplicating values (all values should be unique). See the function find_prime_polys() and: http://research.swtch.com/field and http://www.pclviewer.com/rs2/galois.html
# note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables).
# c_exp is the exponent for the field's characteristic GF(2^c_exp)
global gf_exp, gf_log, field_charac
field_charac = int(2**c_exp - 1)
gf_exp = bytearray(field_charac * 2) # anti-log (exponential) table. The first two elements will always be [GF256int(1), generator]
gf_log = bytearray(field_charac+1) # log table, log[0] is impossible and thus unused
# For each possible value in the galois field 2^8, we will pre-compute the logarithm and anti-logarithm (exponential) of this value
# To do that, we generate the Galois Field F(2^p) by building a list starting with the element 0 followed by the (p-1) successive powers of the generator α : 1, α, α^1, α^2, ..., α^(p-1).
x = 1
for i in xrange(field_charac): # we could skip index 255 which is equal to index 0 because of modulo: g^255==g^0 but either way, this does not change the later outputs (ie, the ecc symbols will be the same either way)
gf_exp[i] = x # compute anti-log for this value and store it in a table
gf_log[x] = i # compute log at the same time
x = gf_mult_noLUT(x, generator, prim, field_charac+1)
# If you use only generator==2 or a power of 2, you can use the following which is faster than gf_mult_noLUT():
#x <<= 1 # multiply by 2 (change 1 by another number y to multiply by a power of 2^y)
#if x & 0x100: # similar to x >= 256, but a lot faster (because 0x100 == 256)
#x ^= prim # substract the primary polynomial to the current value (instead of 255, so that we get a unique set made of coprime numbers), this is the core of the tables generation
# Optimization: double the size of the anti-log table so that we don't need to mod 255 to stay inside the bounds (because we will mainly use this table for the multiplication of two GF numbers, no more).
for i in xrange(field_charac, field_charac * 2):
gf_exp[i] = gf_exp[i - field_charac]
return [gf_log, gf_exp] | python | def init_tables(prim=0x11d, generator=2, c_exp=8):
'''Precompute the logarithm and anti-log tables for faster computation later, using the provided primitive polynomial.
These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2.
The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y.
That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables.
For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks
'''
# generator is the generator number (the "increment" that will be used to walk through the field by multiplication, this must be a prime number). This is basically the base of the logarithm/anti-log tables. Also often noted "alpha" in academic books.
# prim is the primitive/prime (binary) polynomial and must be irreducible (ie, it can't represented as the product of two smaller polynomials). It's a polynomial in the binary sense: each bit is a coefficient, but in fact it's an integer between field_charac+1 and field_charac*2, and not a list of gf values. The prime polynomial will be used to reduce the overflows back into the range of the Galois Field without duplicating values (all values should be unique). See the function find_prime_polys() and: http://research.swtch.com/field and http://www.pclviewer.com/rs2/galois.html
# note that the choice of generator or prime polynomial doesn't matter very much: any two finite fields of size p^n have identical structure, even if they give the individual elements different names (ie, the coefficients of the codeword will be different, but the final result will be the same: you can always correct as many errors/erasures with any choice for those parameters). That's why it makes sense to refer to all the finite fields, and all decoders based on Reed-Solomon, of size p^n as one concept: GF(p^n). It can however impact sensibly the speed (because some parameters will generate sparser tables).
# c_exp is the exponent for the field's characteristic GF(2^c_exp)
global gf_exp, gf_log, field_charac
field_charac = int(2**c_exp - 1)
gf_exp = bytearray(field_charac * 2) # anti-log (exponential) table. The first two elements will always be [GF256int(1), generator]
gf_log = bytearray(field_charac+1) # log table, log[0] is impossible and thus unused
# For each possible value in the galois field 2^8, we will pre-compute the logarithm and anti-logarithm (exponential) of this value
# To do that, we generate the Galois Field F(2^p) by building a list starting with the element 0 followed by the (p-1) successive powers of the generator α : 1, α, α^1, α^2, ..., α^(p-1).
x = 1
for i in xrange(field_charac): # we could skip index 255 which is equal to index 0 because of modulo: g^255==g^0 but either way, this does not change the later outputs (ie, the ecc symbols will be the same either way)
gf_exp[i] = x # compute anti-log for this value and store it in a table
gf_log[x] = i # compute log at the same time
x = gf_mult_noLUT(x, generator, prim, field_charac+1)
# If you use only generator==2 or a power of 2, you can use the following which is faster than gf_mult_noLUT():
#x <<= 1 # multiply by 2 (change 1 by another number y to multiply by a power of 2^y)
#if x & 0x100: # similar to x >= 256, but a lot faster (because 0x100 == 256)
#x ^= prim # substract the primary polynomial to the current value (instead of 255, so that we get a unique set made of coprime numbers), this is the core of the tables generation
# Optimization: double the size of the anti-log table so that we don't need to mod 255 to stay inside the bounds (because we will mainly use this table for the multiplication of two GF numbers, no more).
for i in xrange(field_charac, field_charac * 2):
gf_exp[i] = gf_exp[i - field_charac]
return [gf_log, gf_exp] | Precompute the logarithm and anti-log tables for faster computation later, using the provided primitive polynomial.
These tables are used for multiplication/division since addition/substraction are simple XOR operations inside GF of characteristic 2.
The basic idea is quite simple: since b**(log_b(x), log_b(y)) == x * y given any number b (the base or generator of the logarithm), then we can use any number b to precompute logarithm and anti-log (exponentiation) tables to use for multiplying two numbers x and y.
That's why when we use a different base/generator number, the log and anti-log tables are drastically different, but the resulting computations are the same given any such tables.
For more infos, see https://en.wikipedia.org/wiki/Finite_field_arithmetic#Implementation_tricks | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L180-L214 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_mult_noLUT_slow | def gf_mult_noLUT_slow(x, y, prim=0):
'''Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in xrange(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result | python | def gf_mult_noLUT_slow(x, y, prim=0):
'''Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial.'''
### Define bitwise carry-less operations as inner functions ###
def cl_mult(x,y):
'''Bitwise carry-less multiplication on integers'''
z = 0
i = 0
while (y>>i) > 0:
if y & (1<<i):
z ^= x<<i
i += 1
return z
def bit_length(n):
'''Compute the position of the most significant bit (1) of an integer. Equivalent to int.bit_length()'''
bits = 0
while n >> bits: bits += 1
return bits
def cl_div(dividend, divisor=None):
'''Bitwise carry-less long division on integers and returns the remainder'''
# Compute the position of the most significant bit for each integers
dl1 = bit_length(dividend)
dl2 = bit_length(divisor)
# If the dividend is smaller than the divisor, just exit
if dl1 < dl2:
return dividend
# Else, align the most significant 1 of the divisor to the most significant 1 of the dividend (by shifting the divisor)
for i in xrange(dl1-dl2,-1,-1):
# Check that the dividend is divisible (useless for the first iteration but important for the next ones)
if dividend & (1 << i+dl2-1):
# If divisible, then shift the divisor to align the most significant bits and XOR (carry-less substraction)
dividend ^= divisor << i
return dividend
### Main GF multiplication routine ###
# Multiply the gf numbers
result = cl_mult(x,y)
# Then do a modular reduction (ie, remainder from the division) with an irreducible primitive polynomial so that it stays inside GF bounds
if prim > 0:
result = cl_div(result, prim)
return result | Multiplication in Galois Fields without using a precomputed look-up table (and thus it's slower) by using the standard carry-less multiplication + modular reduction using an irreducible prime polynomial. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L243-L287 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_mult_noLUT | def gf_mult_noLUT(x, y, prim=0, field_charac_full=256, carryless=True):
'''Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction).
If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction).'''
r = 0
while y: # while y is above 0
if y & 1: r = r ^ x if carryless else r + x # y is odd, then add the corresponding x to r (the sum of all x's corresponding to odd y's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).
y = y >> 1 # equivalent to y // 2
x = x << 1 # equivalent to x*2
if prim > 0 and x & field_charac_full: x = x ^ prim # GF modulo: if x >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).
return r | python | def gf_mult_noLUT(x, y, prim=0, field_charac_full=256, carryless=True):
'''Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction).
If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction).'''
r = 0
while y: # while y is above 0
if y & 1: r = r ^ x if carryless else r + x # y is odd, then add the corresponding x to r (the sum of all x's corresponding to odd y's will give the final product). Note that since we're in GF(2), the addition is in fact an XOR (very important because in GF(2) the multiplication and additions are carry-less, thus it changes the result!).
y = y >> 1 # equivalent to y // 2
x = x << 1 # equivalent to x*2
if prim > 0 and x & field_charac_full: x = x ^ prim # GF modulo: if x >= 256 then apply modular reduction using the primitive polynomial (we just substract, but since the primitive number can be above 256 then we directly XOR).
return r | Galois Field integer multiplication using Russian Peasant Multiplication algorithm (faster than the standard multiplication + modular reduction).
If prim is 0 and carryless=False, then the function produces the result for a standard integers multiplication (no carry-less arithmetics nor modular reduction). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L289-L299 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_poly_mul | def gf_poly_mul(p, q):
'''Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log.'''
# Pre-allocate the result array
r = bytearray(len(p) + len(q) - 1)
# Precompute the logarithm of p
lp = [gf_log[p[i]] for i in xrange(len(p))]
# Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q)
for j in xrange(len(q)):
qj = q[j] # optimization: load the coefficient once
if qj != 0: # log(0) is undefined, we need to check that
lq = gf_log[qj] # Optimization: precache the logarithm of the current coefficient of q
for i in xrange(len(p)):
if p[i] != 0: # log(0) is undefined, need to check that...
r[i + j] ^= gf_exp[lp[i] + lq] # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j]))
return r | python | def gf_poly_mul(p, q):
'''Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log.'''
# Pre-allocate the result array
r = bytearray(len(p) + len(q) - 1)
# Precompute the logarithm of p
lp = [gf_log[p[i]] for i in xrange(len(p))]
# Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q)
for j in xrange(len(q)):
qj = q[j] # optimization: load the coefficient once
if qj != 0: # log(0) is undefined, we need to check that
lq = gf_log[qj] # Optimization: precache the logarithm of the current coefficient of q
for i in xrange(len(p)):
if p[i] != 0: # log(0) is undefined, need to check that...
r[i + j] ^= gf_exp[lp[i] + lq] # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j]))
return r | Multiply two polynomials, inside Galois Field (but the procedure is generic). Optimized function by precomputation of log. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L316-L330 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_poly_mul_simple | def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower
'''Multiply two polynomials, inside Galois Field'''
# Pre-allocate the result array
r = bytearray(len(p) + len(q) - 1)
# Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q)
for j in xrange(len(q)):
for i in xrange(len(p)):
r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication
return r | python | def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower
'''Multiply two polynomials, inside Galois Field'''
# Pre-allocate the result array
r = bytearray(len(p) + len(q) - 1)
# Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q)
for j in xrange(len(q)):
for i in xrange(len(p)):
r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication
return r | Multiply two polynomials, inside Galois Field | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L332-L340 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_poly_div | def gf_poly_div(dividend, divisor):
'''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field).'''
# CAUTION: this function expects polynomials to follow the opposite convention at decoding: the terms must go from the biggest to lowest degree (while most other functions here expect a list from lowest to biggest degree). eg: 1 + 2x + 5x^2 = [5, 2, 1], NOT [1, 2, 5]
msg_out = bytearray(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed
#normalizer = divisor[0] # precomputing for performance
for i in xrange(len(dividend) - (len(divisor)-1)):
#msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division
coef = msg_out[i] # precaching
if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization). In fact if you remove it, it should still work because gf_mul() will take care of the condition. But it's still a good practice to put the condition here.
for j in xrange(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient
if divisor[j] != 0: # log(0) is undefined
msg_out[i + j] ^= gf_mul(divisor[j], coef) # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef
# The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder.
separator = -(len(divisor)-1)
return msg_out[:separator], msg_out[separator:] | python | def gf_poly_div(dividend, divisor):
'''Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field).'''
# CAUTION: this function expects polynomials to follow the opposite convention at decoding: the terms must go from the biggest to lowest degree (while most other functions here expect a list from lowest to biggest degree). eg: 1 + 2x + 5x^2 = [5, 2, 1], NOT [1, 2, 5]
msg_out = bytearray(dividend) # Copy the dividend list and pad with 0 where the ecc bytes will be computed
#normalizer = divisor[0] # precomputing for performance
for i in xrange(len(dividend) - (len(divisor)-1)):
#msg_out[i] /= normalizer # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]. For more infos, see http://en.wikipedia.org/wiki/Synthetic_division
coef = msg_out[i] # precaching
if coef != 0: # log(0) is undefined, so we need to avoid that case explicitly (and it's also a good optimization). In fact if you remove it, it should still work because gf_mul() will take care of the condition. But it's still a good practice to put the condition here.
for j in xrange(1, len(divisor)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient
if divisor[j] != 0: # log(0) is undefined
msg_out[i + j] ^= gf_mul(divisor[j], coef) # equivalent to the more mathematically correct (but xoring directly is faster): msg_out[i + j] += -divisor[j] * coef
# The resulting msg_out contains both the quotient and the remainder, the remainder being the size of the divisor (the remainder has necessarily the same degree as the divisor -- not length but degree == length-1 -- since it's what we couldn't divide from the dividend), so we compute the index where this separation is, and return the quotient and remainder.
separator = -(len(divisor)-1)
return msg_out[:separator], msg_out[separator:] | Fast polynomial division by using Extended Synthetic Division and optimized for GF(2^p) computations (doesn't work with standard polynomials outside of this galois field). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L346-L362 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_poly_square | def gf_poly_square(poly):
'''Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg.'''
length = len(poly)
out = bytearray(2*length - 1)
for i in xrange(length-1):
p = poly[i]
k = 2*i
if p != 0:
#out[k] = gf_exp[(2*gf_log[p]) % field_charac] # not necessary to modulo (2^r)-1 since gf_exp is duplicated up to 510.
out[k] = gf_exp[2*gf_log[p]]
#else: # not necessary since the output is already initialized to an array of 0
#out[k] = 0
out[2*length-2] = gf_exp[2*gf_log[poly[length-1]]]
if out[0] == 0: out[0] = 2*poly[1] - 1
return out | python | def gf_poly_square(poly):
'''Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg.'''
length = len(poly)
out = bytearray(2*length - 1)
for i in xrange(length-1):
p = poly[i]
k = 2*i
if p != 0:
#out[k] = gf_exp[(2*gf_log[p]) % field_charac] # not necessary to modulo (2^r)-1 since gf_exp is duplicated up to 510.
out[k] = gf_exp[2*gf_log[p]]
#else: # not necessary since the output is already initialized to an array of 0
#out[k] = 0
out[2*length-2] = gf_exp[2*gf_log[poly[length-1]]]
if out[0] == 0: out[0] = 2*poly[1] - 1
return out | Linear time implementation of polynomial squaring. For details, see paper: "A fast software implementation for arithmetic operations in GF (2n)". De Win, E., Bosselaers, A., Vandenberghe, S., De Gersem, P., & Vandewalle, J. (1996, January). In Advances in Cryptology - Asiacrypt'96 (pp. 65-76). Springer Berlin Heidelberg. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L364-L378 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | gf_poly_eval | def gf_poly_eval(poly, x):
'''Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency.'''
y = poly[0]
for i in xrange(1, len(poly)):
y = gf_mul(y, x) ^ poly[i]
return y | python | def gf_poly_eval(poly, x):
'''Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency.'''
y = poly[0]
for i in xrange(1, len(poly)):
y = gf_mul(y, x) ^ poly[i]
return y | Evaluates a polynomial in GF(2^p) given the value for x. This is based on Horner's scheme for maximum efficiency. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L380-L385 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_generator_poly | def rs_generator_poly(nsym, fcr=0, generator=2):
'''Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon)'''
g = bytearray([1])
for i in xrange(nsym):
g = gf_poly_mul(g, [1, gf_pow(generator, i+fcr)])
return g | python | def rs_generator_poly(nsym, fcr=0, generator=2):
'''Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon)'''
g = bytearray([1])
for i in xrange(nsym):
g = gf_poly_mul(g, [1, gf_pow(generator, i+fcr)])
return g | Generate an irreducible generator polynomial (necessary to encode a message into Reed-Solomon) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L390-L395 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_generator_poly_all | def rs_generator_poly_all(max_nsym, fcr=0, generator=2):
'''Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates.'''
g_all = {}
g_all[0] = g_all[1] = [1]
for nsym in xrange(max_nsym):
g_all[nsym] = rs_generator_poly(nsym, fcr, generator)
return g_all | python | def rs_generator_poly_all(max_nsym, fcr=0, generator=2):
'''Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates.'''
g_all = {}
g_all[0] = g_all[1] = [1]
for nsym in xrange(max_nsym):
g_all[nsym] = rs_generator_poly(nsym, fcr, generator)
return g_all | Generate all irreducible generator polynomials up to max_nsym (usually you can use n, the length of the message+ecc). Very useful to reduce processing time if you want to encode using variable schemes and nsym rates. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L397-L403 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_simple_encode_msg | def rs_simple_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None):
'''Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below)'''
global field_charac
if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac))
if gen is None: gen = rs_generator_poly(nsym, fcr, generator)
# Pad the message, then divide it by the irreducible generator polynomial
_, remainder = gf_poly_div(msg_in + bytearray(len(gen)-1), gen)
# The remainder is our RS code! Just append it to our original message to get our full codeword (this represents a polynomial of max 256 terms)
msg_out = msg_in + remainder
# Return the codeword
return msg_out | python | def rs_simple_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None):
'''Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below)'''
global field_charac
if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac))
if gen is None: gen = rs_generator_poly(nsym, fcr, generator)
# Pad the message, then divide it by the irreducible generator polynomial
_, remainder = gf_poly_div(msg_in + bytearray(len(gen)-1), gen)
# The remainder is our RS code! Just append it to our original message to get our full codeword (this represents a polynomial of max 256 terms)
msg_out = msg_in + remainder
# Return the codeword
return msg_out | Simple Reed-Solomon encoding (mainly an example for you to understand how it works, because it's slower than the inlined function below) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L405-L416 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_encode_msg | def rs_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None):
'''Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field'''
global field_charac
if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac))
if gen is None: gen = rs_generator_poly(nsym, fcr, generator)
msg_in = bytearray(msg_in)
msg_out = bytearray(msg_in) + bytearray(len(gen)-1) # init msg_out with the values inside msg_in and pad with len(gen)-1 bytes (which is the number of ecc symbols).
# Precompute the logarithm of every items in the generator
lgen = bytearray([gf_log[gen[j]] for j in xrange(len(gen))])
# Extended synthetic division main loop
# Fastest implementation with PyPy (but the Cython version in creedsolo.pyx is about 2x faster)
for i in xrange(len(msg_in)):
coef = msg_out[i] # Note that it's msg_out here, not msg_in. Thus, we reuse the updated value at each iteration (this is how Synthetic Division works: instead of storing in a temporary register the intermediate values, we directly commit them to the output).
# coef = gf_mul(msg_out[i], gf_inverse(gen[0])) # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]
if coef != 0: # log(0) is undefined, so we need to manually check for this case. There's no need to check the divisor here because we know it can't be 0 since we generated it.
lcoef = gf_log[coef] # precaching
for j in xrange(1, len(gen)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic)
#if gen[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0
msg_out[i + j] ^= gf_exp[lcoef + lgen[j]] # optimization, equivalent to gf_mul(gen[j], msg_out[i]) and we just substract it to msg_out[i+j] (but since we are in GF256, it's equivalent to an addition and to an XOR). In other words, this is simply a "multiply-accumulate operation"
# Recopy the original message bytes (overwrites the part where the quotient was computed)
msg_out[:len(msg_in)] = msg_in # equivalent to c = mprime - b, where mprime is msg_in padded with [0]*nsym
return msg_out | python | def rs_encode_msg(msg_in, nsym, fcr=0, generator=2, gen=None):
'''Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field'''
global field_charac
if (len(msg_in) + nsym) > field_charac: raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in)+nsym, field_charac))
if gen is None: gen = rs_generator_poly(nsym, fcr, generator)
msg_in = bytearray(msg_in)
msg_out = bytearray(msg_in) + bytearray(len(gen)-1) # init msg_out with the values inside msg_in and pad with len(gen)-1 bytes (which is the number of ecc symbols).
# Precompute the logarithm of every items in the generator
lgen = bytearray([gf_log[gen[j]] for j in xrange(len(gen))])
# Extended synthetic division main loop
# Fastest implementation with PyPy (but the Cython version in creedsolo.pyx is about 2x faster)
for i in xrange(len(msg_in)):
coef = msg_out[i] # Note that it's msg_out here, not msg_in. Thus, we reuse the updated value at each iteration (this is how Synthetic Division works: instead of storing in a temporary register the intermediate values, we directly commit them to the output).
# coef = gf_mul(msg_out[i], gf_inverse(gen[0])) # for general polynomial division (when polynomials are non-monic), the usual way of using synthetic division is to divide the divisor g(x) with its leading coefficient (call it a). In this implementation, this means:we need to compute: coef = msg_out[i] / gen[0]
if coef != 0: # log(0) is undefined, so we need to manually check for this case. There's no need to check the divisor here because we know it can't be 0 since we generated it.
lcoef = gf_log[coef] # precaching
for j in xrange(1, len(gen)): # in synthetic division, we always skip the first coefficient of the divisior, because it's only used to normalize the dividend coefficient (which is here useless since the divisor, the generator polynomial, is always monic)
#if gen[j] != 0: # log(0) is undefined so we need to check that, but it slow things down in fact and it's useless in our case (reed-solomon encoding) since we know that all coefficients in the generator are not 0
msg_out[i + j] ^= gf_exp[lcoef + lgen[j]] # optimization, equivalent to gf_mul(gen[j], msg_out[i]) and we just substract it to msg_out[i+j] (but since we are in GF256, it's equivalent to an addition and to an XOR). In other words, this is simply a "multiply-accumulate operation"
# Recopy the original message bytes (overwrites the part where the quotient was computed)
msg_out[:len(msg_in)] = msg_in # equivalent to c = mprime - b, where mprime is msg_in padded with [0]*nsym
return msg_out | Reed-Solomon main encoding function, using polynomial division (Extended Synthetic Division, the fastest algorithm available to my knowledge), better explained at http://research.swtch.com/field | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L418-L444 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_calc_syndromes | def rs_calc_syndromes(msg, nsym, fcr=0, generator=2):
'''Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
'''
# Note the "[0] +" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)] | python | def rs_calc_syndromes(msg, nsym, fcr=0, generator=2):
'''Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
'''
# Note the "[0] +" : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return [0] + [gf_poly_eval(msg, gf_pow(generator, i+fcr)) for i in xrange(nsym)] | Given the received codeword msg and the number of error correcting symbols (nsym), computes the syndromes polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L449-L455 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_correct_errata | def rs_correct_errata(msg_in, synd, err_pos, fcr=0, generator=2): # err_pos is a list of the positions of the errors/erasures/errata
'''Forney algorithm, computes the values (error magnitude) to correct the input message.'''
global field_charac
msg = bytearray(msg_in)
# calculate errata locator polynomial to correct both errors and erasures (by combining the errors positions given by the error locator polynomial found by BM with the erasures positions given by caller)
coef_pos = [len(msg) - 1 - p for p in err_pos] # need to convert the positions to coefficients degrees for the errata locator algo to work (eg: instead of [0, 1, 2] it will become [len(msg)-1, len(msg)-2, len(msg) -3])
err_loc = rs_find_errata_locator(coef_pos, generator)
# calculate errata evaluator polynomial (often called Omega or Gamma in academic papers)
err_eval = rs_find_error_evaluator(synd[::-1], err_loc, len(err_loc)-1)[::-1]
# Second part of Chien search to get the error location polynomial X from the error positions in err_pos (the roots of the error locator polynomial, ie, where it evaluates to 0)
X = [] # will store the position of the errors
for i in xrange(len(coef_pos)):
l = field_charac - coef_pos[i]
X.append( gf_pow(generator, -l) )
# Forney algorithm: compute the magnitudes
E = bytearray(len(msg)) # will store the values that need to be corrected (substracted) to the message containing errors. This is sometimes called the error magnitude polynomial.
Xlength = len(X)
for i, Xi in enumerate(X):
Xi_inv = gf_inverse(Xi)
# Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197).
# the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197.
err_loc_prime_tmp = []
for j in xrange(Xlength):
if j != i:
err_loc_prime_tmp.append( gf_sub(1, gf_mul(Xi_inv, X[j])) )
# compute the product, which is the denominator of the Forney algorithm (errata locator derivative)
err_loc_prime = 1
for coef in err_loc_prime_tmp:
err_loc_prime = gf_mul(err_loc_prime, coef)
# equivalent to: err_loc_prime = functools.reduce(gf_mul, err_loc_prime_tmp, 1)
# Compute y (evaluation of the errata evaluator polynomial)
# This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms).
# Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound.
y = gf_poly_eval(err_eval[::-1], Xi_inv) # numerator of the Forney algorithm (errata evaluator evaluated)
y = gf_mul(gf_pow(Xi, 1-fcr), y) # adjust to fcr parameter
# Compute the magnitude
magnitude = gf_div(y, err_loc_prime) # magnitude value of the error, calculated by the Forney algorithm (an equation in fact): dividing the errata evaluator with the errata locator derivative gives us the errata magnitude (ie, value to repair) the ith symbol
E[err_pos[i]] = magnitude # store the magnitude for this error into the magnitude polynomial
# Apply the correction of values to get our message corrected! (note that the ecc bytes also gets corrected!)
# (this isn't the Forney algorithm, we just apply the result of decoding here)
msg = gf_poly_add(msg, E) # equivalent to Ci = Ri - Ei where Ci is the correct message, Ri the received (senseword) message, and Ei the errata magnitudes (minus is replaced by XOR since it's equivalent in GF(2^p)). So in fact here we substract from the received message the errors magnitude, which logically corrects the value to what it should be.
return msg | python | def rs_correct_errata(msg_in, synd, err_pos, fcr=0, generator=2): # err_pos is a list of the positions of the errors/erasures/errata
'''Forney algorithm, computes the values (error magnitude) to correct the input message.'''
global field_charac
msg = bytearray(msg_in)
# calculate errata locator polynomial to correct both errors and erasures (by combining the errors positions given by the error locator polynomial found by BM with the erasures positions given by caller)
coef_pos = [len(msg) - 1 - p for p in err_pos] # need to convert the positions to coefficients degrees for the errata locator algo to work (eg: instead of [0, 1, 2] it will become [len(msg)-1, len(msg)-2, len(msg) -3])
err_loc = rs_find_errata_locator(coef_pos, generator)
# calculate errata evaluator polynomial (often called Omega or Gamma in academic papers)
err_eval = rs_find_error_evaluator(synd[::-1], err_loc, len(err_loc)-1)[::-1]
# Second part of Chien search to get the error location polynomial X from the error positions in err_pos (the roots of the error locator polynomial, ie, where it evaluates to 0)
X = [] # will store the position of the errors
for i in xrange(len(coef_pos)):
l = field_charac - coef_pos[i]
X.append( gf_pow(generator, -l) )
# Forney algorithm: compute the magnitudes
E = bytearray(len(msg)) # will store the values that need to be corrected (substracted) to the message containing errors. This is sometimes called the error magnitude polynomial.
Xlength = len(X)
for i, Xi in enumerate(X):
Xi_inv = gf_inverse(Xi)
# Compute the formal derivative of the error locator polynomial (see Blahut, Algebraic codes for data transmission, pp 196-197).
# the formal derivative of the errata locator is used as the denominator of the Forney Algorithm, which simply says that the ith error value is given by error_evaluator(gf_inverse(Xi)) / error_locator_derivative(gf_inverse(Xi)). See Blahut, Algebraic codes for data transmission, pp 196-197.
err_loc_prime_tmp = []
for j in xrange(Xlength):
if j != i:
err_loc_prime_tmp.append( gf_sub(1, gf_mul(Xi_inv, X[j])) )
# compute the product, which is the denominator of the Forney algorithm (errata locator derivative)
err_loc_prime = 1
for coef in err_loc_prime_tmp:
err_loc_prime = gf_mul(err_loc_prime, coef)
# equivalent to: err_loc_prime = functools.reduce(gf_mul, err_loc_prime_tmp, 1)
# Compute y (evaluation of the errata evaluator polynomial)
# This is a more faithful translation of the theoretical equation contrary to the old forney method. Here it is exactly copy/pasted from the included presentation decoding_rs.pdf: Yl = omega(Xl.inverse()) / prod(1 - Xj*Xl.inverse()) for j in len(X) (in the paper it's for j in s, but it's useless when len(X) < s because we compute neutral terms 1 for nothing, and wrong when correcting more than s erasures or erasures+errors since it prevents computing all required terms).
# Thus here this method works with erasures too because firstly we fixed the equation to be like the theoretical one (don't know why it was modified in _old_forney(), if it's an optimization, it doesn't enhance anything), and secondly because we removed the product bound on s, which prevented computing errors and erasures above the s=(n-k)//2 bound.
y = gf_poly_eval(err_eval[::-1], Xi_inv) # numerator of the Forney algorithm (errata evaluator evaluated)
y = gf_mul(gf_pow(Xi, 1-fcr), y) # adjust to fcr parameter
# Compute the magnitude
magnitude = gf_div(y, err_loc_prime) # magnitude value of the error, calculated by the Forney algorithm (an equation in fact): dividing the errata evaluator with the errata locator derivative gives us the errata magnitude (ie, value to repair) the ith symbol
E[err_pos[i]] = magnitude # store the magnitude for this error into the magnitude polynomial
# Apply the correction of values to get our message corrected! (note that the ecc bytes also gets corrected!)
# (this isn't the Forney algorithm, we just apply the result of decoding here)
msg = gf_poly_add(msg, E) # equivalent to Ci = Ri - Ei where Ci is the correct message, Ri the received (senseword) message, and Ei the errata magnitudes (minus is replaced by XOR since it's equivalent in GF(2^p)). So in fact here we substract from the received message the errors magnitude, which logically corrects the value to what it should be.
return msg | Forney algorithm, computes the values (error magnitude) to correct the input message. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L457-L505 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_find_error_locator | def rs_find_error_locator(synd, nsym, erase_loc=None, erase_count=0):
'''Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm'''
# The idea is that BM will iteratively estimate the error locator polynomial.
# To do this, it will compute a Discrepancy term called Delta, which will tell us if the error locator polynomial needs an update or not
# (hence why it's called discrepancy: it tells us when we are getting off board from the correct value).
# Init the polynomials
if erase_loc: # if the erasure locator polynomial is supplied, we init with its value, so that we include erasures in the final locator polynomial
err_loc = bytearray(erase_loc)
old_loc = bytearray(erase_loc)
else:
err_loc = bytearray([1]) # This is the main variable we want to fill, also called Sigma in other notations or more formally the errors/errata locator polynomial.
old_loc = bytearray([1]) # BM is an iterative algorithm, and we need the errata locator polynomial of the previous iteration in order to update other necessary variables.
#L = 0 # update flag variable, not needed here because we use an alternative equivalent way of checking if update is needed (but using the flag could potentially be faster depending on if using length(list) is taking linear time in your language, here in Python it's constant so it's as fast.
# Fix the syndrome shifting: when computing the syndrome, some implementations may prepend a 0 coefficient for the lowest degree term (the constant). This is a case of syndrome shifting, thus the syndrome will be bigger than the number of ecc symbols (I don't know what purpose serves this shifting). If that's the case, then we need to account for the syndrome shifting when we use the syndrome such as inside BM, by skipping those prepended coefficients.
# Another way to detect the shifting is to detect the 0 coefficients: by definition, a syndrome does not contain any 0 coefficient (except if there are no errors/erasures, in this case they are all 0). This however doesn't work with the modified Forney syndrome, which set to 0 the coefficients corresponding to erasures, leaving only the coefficients corresponding to errors.
synd_shift = 0
if len(synd) > nsym: synd_shift = len(synd) - nsym
for i in xrange(nsym-erase_count): # generally: nsym-erase_count == len(synd), except when you input a partial erase_loc and using the full syndrome instead of the Forney syndrome, in which case nsym-erase_count is more correct (len(synd) will fail badly with IndexError).
if erase_loc: # if an erasures locator polynomial was provided to init the errors locator polynomial, then we must skip the FIRST erase_count iterations (not the last iterations, this is very important!)
K = erase_count+i+synd_shift
else: # if erasures locator is not provided, then either there's no erasures to account or we use the Forney syndromes, so we don't need to use erase_count nor erase_loc (the erasures have been trimmed out of the Forney syndromes).
K = i+synd_shift
# Compute the discrepancy Delta
# Here is the close-to-the-books operation to compute the discrepancy Delta: it's a simple polynomial multiplication of error locator with the syndromes, and then we get the Kth element.
#delta = gf_poly_mul(err_loc[::-1], synd)[K] # theoretically it should be gf_poly_add(synd[::-1], [1])[::-1] instead of just synd, but it seems it's not absolutely necessary to correctly decode.
# But this can be optimized: since we only need the Kth element, we don't need to compute the polynomial multiplication for any other element but the Kth. Thus to optimize, we compute the polymul only at the item we need, skipping the rest (avoiding a nested loop, thus we are linear time instead of quadratic).
# This optimization is actually described in several figures of the book "Algebraic codes for data transmission", Blahut, Richard E., 2003, Cambridge university press.
delta = synd[K]
for j in xrange(1, len(err_loc)):
delta ^= gf_mul(err_loc[-(j+1)], synd[K - j]) # delta is also called discrepancy. Here we do a partial polynomial multiplication (ie, we compute the polynomial multiplication only for the term of degree K). Should be equivalent to brownanrs.polynomial.mul_at().
#print "delta", K, delta, list(gf_poly_mul(err_loc[::-1], synd)) # debugline
# Shift polynomials to compute the next degree
old_loc = old_loc + bytearray([0])
# Iteratively estimate the errata locator and evaluator polynomials
if delta != 0: # Update only if there's a discrepancy
if len(old_loc) > len(err_loc): # Rule B (rule A is implicitly defined because rule A just says that we skip any modification for this iteration)
#if 2*L <= K+erase_count: # equivalent to len(old_loc) > len(err_loc), as long as L is correctly computed
# Computing errata locator polynomial Sigma
new_loc = gf_poly_scale(old_loc, delta)
old_loc = gf_poly_scale(err_loc, gf_inverse(delta)) # effectively we are doing err_loc * 1/delta = err_loc // delta
err_loc = new_loc
# Update the update flag
#L = K - L # the update flag L is tricky: in Blahut's schema, it's mandatory to use `L = K - L - erase_count` (and indeed in a previous draft of this function, if you forgot to do `- erase_count` it would lead to correcting only 2*(errors+erasures) <= (n-k) instead of 2*errors+erasures <= (n-k)), but in this latest draft, this will lead to a wrong decoding in some cases where it should correctly decode! Thus you should try with and without `- erase_count` to update L on your own implementation and see which one works OK without producing wrong decoding failures.
# Update with the discrepancy
err_loc = gf_poly_add(err_loc, gf_poly_scale(old_loc, delta))
# Check if the result is correct, that there's not too many errors to correct
err_loc = list(itertools.dropwhile(lambda x: x == 0, err_loc)) # drop leading 0s, else errs will not be of the correct size
errs = len(err_loc) - 1
if (errs-erase_count) * 2 + erase_count > nsym:
raise ReedSolomonError("Too many errors to correct")
return err_loc | python | def rs_find_error_locator(synd, nsym, erase_loc=None, erase_count=0):
'''Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm'''
# The idea is that BM will iteratively estimate the error locator polynomial.
# To do this, it will compute a Discrepancy term called Delta, which will tell us if the error locator polynomial needs an update or not
# (hence why it's called discrepancy: it tells us when we are getting off board from the correct value).
# Init the polynomials
if erase_loc: # if the erasure locator polynomial is supplied, we init with its value, so that we include erasures in the final locator polynomial
err_loc = bytearray(erase_loc)
old_loc = bytearray(erase_loc)
else:
err_loc = bytearray([1]) # This is the main variable we want to fill, also called Sigma in other notations or more formally the errors/errata locator polynomial.
old_loc = bytearray([1]) # BM is an iterative algorithm, and we need the errata locator polynomial of the previous iteration in order to update other necessary variables.
#L = 0 # update flag variable, not needed here because we use an alternative equivalent way of checking if update is needed (but using the flag could potentially be faster depending on if using length(list) is taking linear time in your language, here in Python it's constant so it's as fast.
# Fix the syndrome shifting: when computing the syndrome, some implementations may prepend a 0 coefficient for the lowest degree term (the constant). This is a case of syndrome shifting, thus the syndrome will be bigger than the number of ecc symbols (I don't know what purpose serves this shifting). If that's the case, then we need to account for the syndrome shifting when we use the syndrome such as inside BM, by skipping those prepended coefficients.
# Another way to detect the shifting is to detect the 0 coefficients: by definition, a syndrome does not contain any 0 coefficient (except if there are no errors/erasures, in this case they are all 0). This however doesn't work with the modified Forney syndrome, which set to 0 the coefficients corresponding to erasures, leaving only the coefficients corresponding to errors.
synd_shift = 0
if len(synd) > nsym: synd_shift = len(synd) - nsym
for i in xrange(nsym-erase_count): # generally: nsym-erase_count == len(synd), except when you input a partial erase_loc and using the full syndrome instead of the Forney syndrome, in which case nsym-erase_count is more correct (len(synd) will fail badly with IndexError).
if erase_loc: # if an erasures locator polynomial was provided to init the errors locator polynomial, then we must skip the FIRST erase_count iterations (not the last iterations, this is very important!)
K = erase_count+i+synd_shift
else: # if erasures locator is not provided, then either there's no erasures to account or we use the Forney syndromes, so we don't need to use erase_count nor erase_loc (the erasures have been trimmed out of the Forney syndromes).
K = i+synd_shift
# Compute the discrepancy Delta
# Here is the close-to-the-books operation to compute the discrepancy Delta: it's a simple polynomial multiplication of error locator with the syndromes, and then we get the Kth element.
#delta = gf_poly_mul(err_loc[::-1], synd)[K] # theoretically it should be gf_poly_add(synd[::-1], [1])[::-1] instead of just synd, but it seems it's not absolutely necessary to correctly decode.
# But this can be optimized: since we only need the Kth element, we don't need to compute the polynomial multiplication for any other element but the Kth. Thus to optimize, we compute the polymul only at the item we need, skipping the rest (avoiding a nested loop, thus we are linear time instead of quadratic).
# This optimization is actually described in several figures of the book "Algebraic codes for data transmission", Blahut, Richard E., 2003, Cambridge university press.
delta = synd[K]
for j in xrange(1, len(err_loc)):
delta ^= gf_mul(err_loc[-(j+1)], synd[K - j]) # delta is also called discrepancy. Here we do a partial polynomial multiplication (ie, we compute the polynomial multiplication only for the term of degree K). Should be equivalent to brownanrs.polynomial.mul_at().
#print "delta", K, delta, list(gf_poly_mul(err_loc[::-1], synd)) # debugline
# Shift polynomials to compute the next degree
old_loc = old_loc + bytearray([0])
# Iteratively estimate the errata locator and evaluator polynomials
if delta != 0: # Update only if there's a discrepancy
if len(old_loc) > len(err_loc): # Rule B (rule A is implicitly defined because rule A just says that we skip any modification for this iteration)
#if 2*L <= K+erase_count: # equivalent to len(old_loc) > len(err_loc), as long as L is correctly computed
# Computing errata locator polynomial Sigma
new_loc = gf_poly_scale(old_loc, delta)
old_loc = gf_poly_scale(err_loc, gf_inverse(delta)) # effectively we are doing err_loc * 1/delta = err_loc // delta
err_loc = new_loc
# Update the update flag
#L = K - L # the update flag L is tricky: in Blahut's schema, it's mandatory to use `L = K - L - erase_count` (and indeed in a previous draft of this function, if you forgot to do `- erase_count` it would lead to correcting only 2*(errors+erasures) <= (n-k) instead of 2*errors+erasures <= (n-k)), but in this latest draft, this will lead to a wrong decoding in some cases where it should correctly decode! Thus you should try with and without `- erase_count` to update L on your own implementation and see which one works OK without producing wrong decoding failures.
# Update with the discrepancy
err_loc = gf_poly_add(err_loc, gf_poly_scale(old_loc, delta))
# Check if the result is correct, that there's not too many errors to correct
err_loc = list(itertools.dropwhile(lambda x: x == 0, err_loc)) # drop leading 0s, else errs will not be of the correct size
errs = len(err_loc) - 1
if (errs-erase_count) * 2 + erase_count > nsym:
raise ReedSolomonError("Too many errors to correct")
return err_loc | Find error/errata locator and evaluator polynomials with Berlekamp-Massey algorithm | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L507-L566 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_find_errata_locator | def rs_find_errata_locator(e_pos, generator=2):
'''Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.'''
# See: http://ocw.usu.edu/Electrical_and_Computer_Engineering/Error_Control_Coding/lecture7.pdf and Blahut, Richard E. "Transform techniques for error control codes." IBM Journal of Research and development 23.3 (1979): 299-315. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.600&rep=rep1&type=pdf and also a MatLab implementation here: http://www.mathworks.com/matlabcentral/fileexchange/23567-reed-solomon-errors-and-erasures-decoder/content//RS_E_E_DEC.m
e_loc = [1] # just to init because we will multiply, so it must be 1 so that the multiplication starts correctly without nulling any term
# erasures_loc is very simple to compute: erasures_loc = prod(1 - x*alpha**i) for i in erasures_pos and where alpha is the alpha chosen to evaluate polynomials (here in this library it's gf(3)). To generate c*x where c is a constant, we simply generate a Polynomial([c, 0]) where 0 is the constant and c is positionned to be the coefficient for x^1.
for i in e_pos:
e_loc = gf_poly_mul( e_loc, gf_poly_add([1], [gf_pow(generator, i), 0]) )
return e_loc | python | def rs_find_errata_locator(e_pos, generator=2):
'''Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument.'''
# See: http://ocw.usu.edu/Electrical_and_Computer_Engineering/Error_Control_Coding/lecture7.pdf and Blahut, Richard E. "Transform techniques for error control codes." IBM Journal of Research and development 23.3 (1979): 299-315. http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.92.600&rep=rep1&type=pdf and also a MatLab implementation here: http://www.mathworks.com/matlabcentral/fileexchange/23567-reed-solomon-errors-and-erasures-decoder/content//RS_E_E_DEC.m
e_loc = [1] # just to init because we will multiply, so it must be 1 so that the multiplication starts correctly without nulling any term
# erasures_loc is very simple to compute: erasures_loc = prod(1 - x*alpha**i) for i in erasures_pos and where alpha is the alpha chosen to evaluate polynomials (here in this library it's gf(3)). To generate c*x where c is a constant, we simply generate a Polynomial([c, 0]) where 0 is the constant and c is positionned to be the coefficient for x^1.
for i in e_pos:
e_loc = gf_poly_mul( e_loc, gf_poly_add([1], [gf_pow(generator, i), 0]) )
return e_loc | Compute the erasures/errors/errata locator polynomial from the erasures/errors/errata positions (the positions must be relative to the x coefficient, eg: "hello worldxxxxxxxxx" is tampered to "h_ll_ worldxxxxxxxxx" with xxxxxxxxx being the ecc of length n-k=9, here the string positions are [1, 4], but the coefficients are reversed since the ecc characters are placed as the first coefficients of the polynomial, thus the coefficients of the erased characters are n-1 - [1, 4] = [18, 15] = erasures_loc to be specified as an argument. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L568-L575 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_find_error_evaluator | def rs_find_error_evaluator(synd, err_loc, nsym):
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.'''
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
_, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder | python | def rs_find_error_evaluator(synd, err_loc, nsym):
'''Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma.'''
# Omega(x) = [ Synd(x) * Error_loc(x) ] mod x^(n-k+1)
_, remainder = gf_poly_div( gf_poly_mul(synd, err_loc), ([1] + [0]*(nsym+1)) ) # first multiply syndromes * errata_locator, then do a polynomial division to truncate the polynomial to the required length
# Faster way that is equivalent
#remainder = gf_poly_mul(synd, err_loc) # first multiply the syndromes with the errata locator polynomial
#remainder = remainder[len(remainder)-(nsym+1):] # then divide by a polynomial of the length we want, which is equivalent to slicing the list (which represents the polynomial)
return remainder | Compute the error (or erasures if you supply sigma=erasures locator polynomial, or errata) evaluator polynomial Omega from the syndrome and the error/erasures/errata locator Sigma. Omega is already computed at the same time as Sigma inside the Berlekamp-Massey implemented above, but in case you modify Sigma, you can recompute Omega afterwards using this method, or just ensure that Omega computed by BM is correct given Sigma. | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L577-L586 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_find_errors | def rs_find_errors(err_loc, nmess, generator=2):
'''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).'''
# nmess = length of whole codeword (message + ecc symbols)
errs = len(err_loc) - 1
err_pos = []
for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols
if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error
err_pos.append(nmess - 1 - i)
# Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial
if len(err_pos) != errs:
# TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible.
raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!")
return err_pos | python | def rs_find_errors(err_loc, nmess, generator=2):
'''Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time).'''
# nmess = length of whole codeword (message + ecc symbols)
errs = len(err_loc) - 1
err_pos = []
for i in xrange(nmess): # normally we should try all 2^8 possible values, but here we optimize to just check the interesting symbols
if gf_poly_eval(err_loc, gf_pow(generator, i)) == 0: # It's a 0? Bingo, it's a root of the error locator polynomial, in other terms this is the location of an error
err_pos.append(nmess - 1 - i)
# Sanity check: the number of errors/errata positions found should be exactly the same as the length of the errata locator polynomial
if len(err_pos) != errs:
# TODO: to decode messages+ecc with length n > 255, we may try to use a bruteforce approach: the correct positions ARE in the final array j, but the problem is because we are above the Galois Field's range, there is a wraparound so that for example if j should be [0, 1, 2, 3], we will also get [255, 256, 257, 258] (because 258 % 255 == 3, same for the other values), so we can't discriminate. The issue is that fixing any errs_nb errors among those will always give a correct output message (in the sense that the syndrome will be all 0), so we may not even be able to check if that's correct or not, so I'm not sure the bruteforce approach may even be possible.
raise ReedSolomonError("Too many (or few) errors found by Chien Search for the errata locator polynomial!")
return err_pos | Find the roots (ie, where evaluation = zero) of error polynomial by bruteforce trial, this is a sort of Chien's search (but less efficient, Chien's search is a way to evaluate the polynomial such that each evaluation only takes constant time). | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L588-L600 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_correct_msg | def rs_correct_msg(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function'''
global field_charac
if len(msg_in) > field_charac:
# Note that it is in fact possible to encode/decode messages that are longer than field_charac, but because this will be above the field, this will generate more error positions during Chien Search than it should, because this will generate duplicate values, which should normally be prevented thank's to the prime polynomial reduction (eg, because it can't discriminate between error at position 1 or 256, both being exactly equal under galois field 2^8). So it's really not advised to do it, but it's possible (but then you're not guaranted to be able to correct any error/erasure on symbols with a position above the length of field_charac -- if you really need a bigger message without chunking, then you should better enlarge c_exp so that you get a bigger field).
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures to correct (beyond the Singleton bound)
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# Find errors locations
if only_erasures:
err_pos = []
else:
# compute the Forney syndromes, which hide the erasures from the original syndrome (so that BM will just have to deal with errors, not erasures)
fsynd = rs_forney_syndromes(synd, erase_pos, len(msg_out), generator)
# compute the error locator polynomial using Berlekamp-Massey
err_loc = rs_find_error_locator(fsynd, nsym, erase_count=len(erase_pos))
# locate the message errors using Chien search (or bruteforce search)
err_pos = rs_find_errors(err_loc[::-1], len(msg_out), generator)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# Find errors values and apply them to correct the message
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, (erase_pos + err_pos), fcr, generator) # note that we here use the original syndrome, not the forney syndrome (because we will correct both errors and erasures, so we need the full syndrome)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:] | python | def rs_correct_msg(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function'''
global field_charac
if len(msg_in) > field_charac:
# Note that it is in fact possible to encode/decode messages that are longer than field_charac, but because this will be above the field, this will generate more error positions during Chien Search than it should, because this will generate duplicate values, which should normally be prevented thank's to the prime polynomial reduction (eg, because it can't discriminate between error at position 1 or 256, both being exactly equal under galois field 2^8). So it's really not advised to do it, but it's possible (but then you're not guaranted to be able to correct any error/erasure on symbols with a position above the length of field_charac -- if you really need a bigger message without chunking, then you should better enlarge c_exp so that you get a bigger field).
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures to correct (beyond the Singleton bound)
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# Find errors locations
if only_erasures:
err_pos = []
else:
# compute the Forney syndromes, which hide the erasures from the original syndrome (so that BM will just have to deal with errors, not erasures)
fsynd = rs_forney_syndromes(synd, erase_pos, len(msg_out), generator)
# compute the error locator polynomial using Berlekamp-Massey
err_loc = rs_find_error_locator(fsynd, nsym, erase_count=len(erase_pos))
# locate the message errors using Chien search (or bruteforce search)
err_pos = rs_find_errors(err_loc[::-1], len(msg_out), generator)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# Find errors values and apply them to correct the message
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, (erase_pos + err_pos), fcr, generator) # note that we here use the original syndrome, not the forney syndrome (because we will correct both errors and erasures, so we need the full syndrome)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:] | Reed-Solomon main decoding function | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L622-L665 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_correct_msg_nofsynd | def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function, without using the modified Forney syndromes'''
global field_charac
if len(msg_in) > field_charac:
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# prepare erasures locator and evaluator polynomials
erase_loc = None
#erase_eval = None
erase_count = 0
if erase_pos:
erase_count = len(erase_pos)
erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos]
erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator)
#erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1)
# prepare errors/errata locator polynomial
if only_erasures:
err_loc = erase_loc[::-1]
#err_eval = erase_eval[::-1]
else:
err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count)
err_loc = err_loc[::-1]
#err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM)
# locate the message errors
err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:] | python | def rs_correct_msg_nofsynd(msg_in, nsym, fcr=0, generator=2, erase_pos=None, only_erasures=False):
'''Reed-Solomon main decoding function, without using the modified Forney syndromes'''
global field_charac
if len(msg_in) > field_charac:
raise ValueError("Message is too long (%i when max is %i)" % (len(msg_in), field_charac))
msg_out = bytearray(msg_in) # copy of message
# erasures: set them to null bytes for easier decoding (but this is not necessary, they will be corrected anyway, but debugging will be easier with null bytes because the error locator polynomial values will only depend on the errors locations, not their values)
if erase_pos is None:
erase_pos = []
else:
for e_pos in erase_pos:
msg_out[e_pos] = 0
# check if there are too many erasures
if len(erase_pos) > nsym: raise ReedSolomonError("Too many erasures to correct")
# prepare the syndrome polynomial using only errors (ie: errors = characters that were either replaced by null byte or changed to another character, but we don't know their positions)
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
# check if there's any error/erasure in the input codeword. If not (all syndromes coefficients are 0), then just return the codeword as-is.
if max(synd) == 0:
return msg_out[:-nsym], msg_out[-nsym:] # no errors
# prepare erasures locator and evaluator polynomials
erase_loc = None
#erase_eval = None
erase_count = 0
if erase_pos:
erase_count = len(erase_pos)
erase_pos_reversed = [len(msg_out)-1-eras for eras in erase_pos]
erase_loc = rs_find_errata_locator(erase_pos_reversed, generator=generator)
#erase_eval = rs_find_error_evaluator(synd[::-1], erase_loc, len(erase_loc)-1)
# prepare errors/errata locator polynomial
if only_erasures:
err_loc = erase_loc[::-1]
#err_eval = erase_eval[::-1]
else:
err_loc = rs_find_error_locator(synd, nsym, erase_loc=erase_loc, erase_count=erase_count)
err_loc = err_loc[::-1]
#err_eval = rs_find_error_evaluator(synd[::-1], err_loc[::-1], len(err_loc)-1)[::-1] # find error/errata evaluator polynomial (not really necessary since we already compute it at the same time as the error locator poly in BM)
# locate the message errors
err_pos = rs_find_errors(err_loc, len(msg_out), generator) # find the roots of the errata locator polynomial (ie: the positions of the errors/errata)
if err_pos is None:
raise ReedSolomonError("Could not locate error")
# compute errata evaluator and errata magnitude polynomials, then correct errors and erasures
msg_out = rs_correct_errata(msg_out, synd, err_pos, fcr=fcr, generator=generator)
# check if the final message is fully repaired
synd = rs_calc_syndromes(msg_out, nsym, fcr, generator)
if max(synd) > 0:
raise ReedSolomonError("Could not correct message")
# return the successfully decoded message
return msg_out[:-nsym], msg_out[-nsym:] | Reed-Solomon main decoding function, without using the modified Forney syndromes | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L667-L719 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | rs_check | def rs_check(msg, nsym, fcr=0, generator=2):
'''Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does)'''
return ( max(rs_calc_syndromes(msg, nsym, fcr, generator)) == 0 ) | python | def rs_check(msg, nsym, fcr=0, generator=2):
'''Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does)'''
return ( max(rs_calc_syndromes(msg, nsym, fcr, generator)) == 0 ) | Returns true if the message + ecc has no error of false otherwise (may not always catch a wrong decoding or a wrong message, particularly if there are too many errors -- above the Singleton bound --, but it usually does) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L721-L723 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | RSCodec.encode | def encode(self, data):
'''Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking'''
if isinstance(data, str):
data = bytearray(data, "latin-1")
chunk_size = self.nsize - self.nsym
enc = bytearray()
for i in xrange(0, len(data), chunk_size):
chunk = data[i:i+chunk_size]
enc.extend(rs_encode_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator))
return enc | python | def encode(self, data):
'''Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking'''
if isinstance(data, str):
data = bytearray(data, "latin-1")
chunk_size = self.nsize - self.nsym
enc = bytearray()
for i in xrange(0, len(data), chunk_size):
chunk = data[i:i+chunk_size]
enc.extend(rs_encode_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator))
return enc | Encode a message (ie, add the ecc symbols) using Reed-Solomon, whatever the length of the message because we use chunking | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L758-L767 |
lrq3000/pyFileFixity | pyFileFixity/lib/reedsolomon/reedsolo.py | RSCodec.decode | def decode(self, data, erase_pos=None, only_erasures=False):
'''Repair a message, whatever its size is, by using chunking'''
# erase_pos is a list of positions where you know (or greatly suspect at least) there is an erasure (ie, wrong character but you know it's at this position). Just input the list of all positions you know there are errors, and this method will automatically split the erasures positions to attach to the corresponding data chunk.
if isinstance(data, str):
data = bytearray(data, "latin-1")
dec = bytearray()
for i in xrange(0, len(data), self.nsize):
# Split the long message in a chunk
chunk = data[i:i+self.nsize]
# Extract the erasures for this chunk
e_pos = []
if erase_pos:
# First extract the erasures for this chunk (all erasures below the maximum chunk length)
e_pos = [x for x in erase_pos if x <= self.nsize]
# Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize (the current chunk's size) so as to prepare the correct alignment for the next iteration
erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize]
# Decode/repair this chunk!
dec.extend(rs_correct_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures)[0])
return dec | python | def decode(self, data, erase_pos=None, only_erasures=False):
'''Repair a message, whatever its size is, by using chunking'''
# erase_pos is a list of positions where you know (or greatly suspect at least) there is an erasure (ie, wrong character but you know it's at this position). Just input the list of all positions you know there are errors, and this method will automatically split the erasures positions to attach to the corresponding data chunk.
if isinstance(data, str):
data = bytearray(data, "latin-1")
dec = bytearray()
for i in xrange(0, len(data), self.nsize):
# Split the long message in a chunk
chunk = data[i:i+self.nsize]
# Extract the erasures for this chunk
e_pos = []
if erase_pos:
# First extract the erasures for this chunk (all erasures below the maximum chunk length)
e_pos = [x for x in erase_pos if x <= self.nsize]
# Then remove the extract erasures from the big list and also decrement all subsequent positions values by nsize (the current chunk's size) so as to prepare the correct alignment for the next iteration
erase_pos = [x - (self.nsize+1) for x in erase_pos if x > self.nsize]
# Decode/repair this chunk!
dec.extend(rs_correct_msg(chunk, self.nsym, fcr=self.fcr, generator=self.generator, erase_pos=e_pos, only_erasures=only_erasures)[0])
return dec | Repair a message, whatever its size is, by using chunking | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L769-L787 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | recurse | def recurse( record, index, stop_types=STOP_TYPES,already_seen=None, type_group=False ):
"""Depth first traversal of a tree, all children are yielded before parent
record -- dictionary record to be recursed upon
index -- mapping 'address' ids to dictionary records
stop_types -- types which will *not* recurse
already_seen -- set storing already-visited nodes
yields the traversed nodes
"""
if already_seen is None:
already_seen = set()
if record['address'] not in already_seen:
already_seen.add(record['address'])
if 'refs' in record:
for child in children( record, index, stop_types=stop_types ):
if child['address'] not in already_seen:
for descendant in recurse(
child, index, stop_types,
already_seen=already_seen, type_group=type_group,
):
yield descendant
yield record | python | def recurse( record, index, stop_types=STOP_TYPES,already_seen=None, type_group=False ):
"""Depth first traversal of a tree, all children are yielded before parent
record -- dictionary record to be recursed upon
index -- mapping 'address' ids to dictionary records
stop_types -- types which will *not* recurse
already_seen -- set storing already-visited nodes
yields the traversed nodes
"""
if already_seen is None:
already_seen = set()
if record['address'] not in already_seen:
already_seen.add(record['address'])
if 'refs' in record:
for child in children( record, index, stop_types=stop_types ):
if child['address'] not in already_seen:
for descendant in recurse(
child, index, stop_types,
already_seen=already_seen, type_group=type_group,
):
yield descendant
yield record | Depth first traversal of a tree, all children are yielded before parent
record -- dictionary record to be recursed upon
index -- mapping 'address' ids to dictionary records
stop_types -- types which will *not* recurse
already_seen -- set storing already-visited nodes
yields the traversed nodes | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L33-L55 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | find_loops | def find_loops( record, index, stop_types = STOP_TYPES, open=None, seen = None ):
"""Find all loops within the index and replace with loop records"""
if open is None:
open = []
if seen is None:
seen = set()
for child in children( record, index, stop_types = stop_types ):
if child['type'] in stop_types or child['type'] == LOOP_TYPE:
continue
if child['address'] in open:
# loop has been found
start = open.index( child['address'] )
new = frozenset( open[start:] )
if new not in seen:
seen.add(new)
yield new
elif child['address'] in seen:
continue
else:
seen.add( child['address'])
open.append( child['address'] )
for loop in find_loops( child, index, stop_types=stop_types, open=open, seen=seen ):
yield loop
open.pop( -1 ) | python | def find_loops( record, index, stop_types = STOP_TYPES, open=None, seen = None ):
"""Find all loops within the index and replace with loop records"""
if open is None:
open = []
if seen is None:
seen = set()
for child in children( record, index, stop_types = stop_types ):
if child['type'] in stop_types or child['type'] == LOOP_TYPE:
continue
if child['address'] in open:
# loop has been found
start = open.index( child['address'] )
new = frozenset( open[start:] )
if new not in seen:
seen.add(new)
yield new
elif child['address'] in seen:
continue
else:
seen.add( child['address'])
open.append( child['address'] )
for loop in find_loops( child, index, stop_types=stop_types, open=open, seen=seen ):
yield loop
open.pop( -1 ) | Find all loops within the index and replace with loop records | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L57-L80 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | promote_loops | def promote_loops( loops, index, shared ):
"""Turn loops into "objects" that can be processed normally"""
for loop in loops:
loop = list(loop)
members = [index[addr] for addr in loop]
external_parents = list(set([
addr for addr in sum([shared.get(addr,[]) for addr in loop],[])
if addr not in loop
]))
if external_parents:
if len(external_parents) == 1:
# potentially a loop that's been looped...
parent = index.get( external_parents[0] )
if parent['type'] == LOOP_TYPE:
continue
# we haven't already been looped...
loop_addr = new_address( index )
shared[loop_addr] = external_parents
loop_record = index[loop_addr] = {
'address': loop_addr,
'refs': loop,
'parents': external_parents,
'type': LOOP_TYPE,
'size': 0,
}
for member in members:
# member's references must *not* point to loop...
member['refs'] = [
ref for ref in member['refs']
if ref not in loop
]
# member's parents are *just* the loop
member['parents'][:] = [loop_addr]
# each referent to loop holds a single reference to the loop rather than many to children
for parent in external_parents:
parent = index[parent]
for member in members:
rewrite_references( parent['refs'], member['address'], None )
parent['refs'].append( loop_addr ) | python | def promote_loops( loops, index, shared ):
"""Turn loops into "objects" that can be processed normally"""
for loop in loops:
loop = list(loop)
members = [index[addr] for addr in loop]
external_parents = list(set([
addr for addr in sum([shared.get(addr,[]) for addr in loop],[])
if addr not in loop
]))
if external_parents:
if len(external_parents) == 1:
# potentially a loop that's been looped...
parent = index.get( external_parents[0] )
if parent['type'] == LOOP_TYPE:
continue
# we haven't already been looped...
loop_addr = new_address( index )
shared[loop_addr] = external_parents
loop_record = index[loop_addr] = {
'address': loop_addr,
'refs': loop,
'parents': external_parents,
'type': LOOP_TYPE,
'size': 0,
}
for member in members:
# member's references must *not* point to loop...
member['refs'] = [
ref for ref in member['refs']
if ref not in loop
]
# member's parents are *just* the loop
member['parents'][:] = [loop_addr]
# each referent to loop holds a single reference to the loop rather than many to children
for parent in external_parents:
parent = index[parent]
for member in members:
rewrite_references( parent['refs'], member['address'], None )
parent['refs'].append( loop_addr ) | Turn loops into "objects" that can be processed normally | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L82-L120 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | children | def children( record, index, key='refs', stop_types=STOP_TYPES ):
"""Retrieve children records for given record"""
result = []
for ref in record.get( key,[]):
try:
record = index[ref]
except KeyError, err:
#print 'No record for %s address %s in %s'%(key, ref, record['address'] )
pass # happens when an unreachable references a reachable that has been compressed out...
else:
if record['type'] not in stop_types:
result.append( record )
return result | python | def children( record, index, key='refs', stop_types=STOP_TYPES ):
"""Retrieve children records for given record"""
result = []
for ref in record.get( key,[]):
try:
record = index[ref]
except KeyError, err:
#print 'No record for %s address %s in %s'%(key, ref, record['address'] )
pass # happens when an unreachable references a reachable that has been compressed out...
else:
if record['type'] not in stop_types:
result.append( record )
return result | Retrieve children records for given record | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L122-L134 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | children_types | def children_types( record, index, key='refs', stop_types=STOP_TYPES ):
"""Produce dictionary mapping type-key to instances for all children"""
types = {}
for child in children( record, index, key, stop_types=stop_types ):
types.setdefault(child['type'],[]).append( child )
return types | python | def children_types( record, index, key='refs', stop_types=STOP_TYPES ):
"""Produce dictionary mapping type-key to instances for all children"""
types = {}
for child in children( record, index, key, stop_types=stop_types ):
types.setdefault(child['type'],[]).append( child )
return types | Produce dictionary mapping type-key to instances for all children | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L136-L141 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | recurse_module | def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 ):
"""Creates a has-a recursive-cost hierarchy
Mutates objects in-place to produce a hierarchy of memory usage based on
reference-holding cost assignment
"""
for record in recurse(
overall_record, index,
stop_types=stop_types,
already_seen=already_seen,
type_group=True,
):
# anything with a totsize we've already processed...
if record.get('totsize') is not None:
continue
rinfo = record
rinfo['module'] = overall_record.get('name',NON_MODULE_REFS )
if not record['refs']:
rinfo['rsize'] = 0
rinfo['children'] = []
else:
# TODO: provide a flag to coalesce based on e.g. type at each level or throughout...
rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) )
rinfo['rsize'] = sum([
(
child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1)
)
for child in rinfo_children
], 0.0 )
rinfo['totsize'] = record['size'] + rinfo['rsize']
return None | python | def recurse_module( overall_record, index, shared, stop_types=STOP_TYPES, already_seen=None, min_size=0 ):
"""Creates a has-a recursive-cost hierarchy
Mutates objects in-place to produce a hierarchy of memory usage based on
reference-holding cost assignment
"""
for record in recurse(
overall_record, index,
stop_types=stop_types,
already_seen=already_seen,
type_group=True,
):
# anything with a totsize we've already processed...
if record.get('totsize') is not None:
continue
rinfo = record
rinfo['module'] = overall_record.get('name',NON_MODULE_REFS )
if not record['refs']:
rinfo['rsize'] = 0
rinfo['children'] = []
else:
# TODO: provide a flag to coalesce based on e.g. type at each level or throughout...
rinfo['children'] = rinfo_children = list ( children( record, index, stop_types=stop_types ) )
rinfo['rsize'] = sum([
(
child.get('totsize',0.0)/float(len(shared.get( child['address'], [])) or 1)
)
for child in rinfo_children
], 0.0 )
rinfo['totsize'] = record['size'] + rinfo['rsize']
return None | Creates a has-a recursive-cost hierarchy
Mutates objects in-place to produce a hierarchy of memory usage based on
reference-holding cost assignment | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L144-L175 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | rewrite_refs | def rewrite_refs( targets, old,new, index, key='refs', single_ref=False ):
"""Rewrite key in all targets (from index if necessary) to replace old with new"""
for parent in targets:
if not isinstance( parent, dict ):
try:
parent = index[parent]
except KeyError, err:
continue
rewrite_references( parent[key], old, new, single_ref=single_ref ) | python | def rewrite_refs( targets, old,new, index, key='refs', single_ref=False ):
"""Rewrite key in all targets (from index if necessary) to replace old with new"""
for parent in targets:
if not isinstance( parent, dict ):
try:
parent = index[parent]
except KeyError, err:
continue
rewrite_references( parent[key], old, new, single_ref=single_ref ) | Rewrite key in all targets (from index if necessary) to replace old with new | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L183-L191 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | rewrite_references | def rewrite_references( sequence, old, new, single_ref=False ):
"""Rewrite parents to point to new in old
sequence -- sequence of id references
old -- old id
new -- new id
returns rewritten sequence
"""
old,new = as_id(old),as_id(new)
to_delete = []
for i,n in enumerate(sequence):
if n == old:
if new is None:
to_delete.append( i )
else:
sequence[i] = new
if single_ref:
new = None
elif n == new and single_ref:
new = None
if to_delete:
to_delete.reverse()
for i in to_delete:
del sequence[i]
return sequence | python | def rewrite_references( sequence, old, new, single_ref=False ):
"""Rewrite parents to point to new in old
sequence -- sequence of id references
old -- old id
new -- new id
returns rewritten sequence
"""
old,new = as_id(old),as_id(new)
to_delete = []
for i,n in enumerate(sequence):
if n == old:
if new is None:
to_delete.append( i )
else:
sequence[i] = new
if single_ref:
new = None
elif n == new and single_ref:
new = None
if to_delete:
to_delete.reverse()
for i in to_delete:
del sequence[i]
return sequence | Rewrite parents to point to new in old
sequence -- sequence of id references
old -- old id
new -- new id
returns rewritten sequence | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L193-L218 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | simple | def simple( child, shared, parent ):
"""Return sub-set of children who are "simple" in the sense of group_children"""
return (
not child.get('refs',())
and (
not shared.get(child['address'])
or
shared.get(child['address']) == [parent['address']]
)
) | python | def simple( child, shared, parent ):
"""Return sub-set of children who are "simple" in the sense of group_children"""
return (
not child.get('refs',())
and (
not shared.get(child['address'])
or
shared.get(child['address']) == [parent['address']]
)
) | Return sub-set of children who are "simple" in the sense of group_children | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L220-L229 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | group_children | def group_children( index, shared, min_kids=10, stop_types=STOP_TYPES, delete_children=True ):
"""Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents...
"""
to_compress = []
for to_simplify in list(iterindex( index )):
if not isinstance( to_simplify, dict ):
continue
for typ,kids in children_types( to_simplify, index, stop_types=stop_types ).items():
kids = [k for k in kids if k and simple(k,shared, to_simplify)]
if len(kids) >= min_kids:
# we can group and compress out...
to_compress.append( (to_simplify,typ,kids))
for to_simplify,typ,kids in to_compress:
typ_address = new_address(index)
kid_addresses = [k['address'] for k in kids]
index[typ_address] = {
'address': typ_address,
'type': MANY_TYPE,
'name': typ,
'size': sum( [k.get('size',0) for k in kids], 0),
'parents': [to_simplify['address']],
}
shared[typ_address] = index[typ_address]['parents']
to_simplify['refs'][:] = [typ_address]
if delete_children:
for address in kid_addresses:
try:
del index[address]
except KeyError, err:
pass # already compressed out
try:
del shared[address]
except KeyError, err:
pass # already compressed out
index[typ_address]['refs'] = []
else:
index[typ_address]['refs'] = kid_addresses | python | def group_children( index, shared, min_kids=10, stop_types=STOP_TYPES, delete_children=True ):
"""Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents...
"""
to_compress = []
for to_simplify in list(iterindex( index )):
if not isinstance( to_simplify, dict ):
continue
for typ,kids in children_types( to_simplify, index, stop_types=stop_types ).items():
kids = [k for k in kids if k and simple(k,shared, to_simplify)]
if len(kids) >= min_kids:
# we can group and compress out...
to_compress.append( (to_simplify,typ,kids))
for to_simplify,typ,kids in to_compress:
typ_address = new_address(index)
kid_addresses = [k['address'] for k in kids]
index[typ_address] = {
'address': typ_address,
'type': MANY_TYPE,
'name': typ,
'size': sum( [k.get('size',0) for k in kids], 0),
'parents': [to_simplify['address']],
}
shared[typ_address] = index[typ_address]['parents']
to_simplify['refs'][:] = [typ_address]
if delete_children:
for address in kid_addresses:
try:
del index[address]
except KeyError, err:
pass # already compressed out
try:
del shared[address]
except KeyError, err:
pass # already compressed out
index[typ_address]['refs'] = []
else:
index[typ_address]['refs'] = kid_addresses | Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents... | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L231-L278 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | simplify_dicts | def simplify_dicts( index, shared, simplify_dicts=SIMPLIFY_DICTS, always_compress=ALWAYS_COMPRESS_DICTS ):
"""Eliminate "noise" dictionary records from the index
index -- overall index of objects (including metadata such as type records)
shared -- parent-count mapping for records in index
module/type/class dictionaries
"""
# things which will have their dictionaries compressed out
to_delete = set()
for to_simplify in iterindex(index):
if to_simplify['address'] in to_delete:
continue
if to_simplify['type'] in simplify_dicts and not 'compressed' in to_simplify:
refs = to_simplify['refs']
for ref in refs:
child = index.get( ref )
if child is not None and child['type'] == 'dict':
child_referrers = child['parents'][:]
if len(child_referrers) == 1 or to_simplify['type'] in always_compress:
to_simplify['compressed'] = True
to_simplify['refs'] = child['refs']
to_simplify['size'] += child['size']
# rewrite anything *else* that was pointing to child to point to us...
while to_simplify['address'] in child_referrers:
child_referrers.remove( to_simplify['address'] )
if child_referrers:
rewrite_refs(
child_referrers,
child['address'],
to_simplify['address'],
index, single_ref=True
)
# now rewrite grandchildren to point to root obj instead of dict
for grandchild in child['refs']:
grandchild = index[grandchild]
parent_set = grandchild['parents']
if parent_set:
rewrite_references(
parent_set,
child,
to_simplify,
single_ref = True,
)
assert parent_set
to_delete.add( child['address'] )
for item in to_delete:
del index[item]
del shared[item]
return index | python | def simplify_dicts( index, shared, simplify_dicts=SIMPLIFY_DICTS, always_compress=ALWAYS_COMPRESS_DICTS ):
"""Eliminate "noise" dictionary records from the index
index -- overall index of objects (including metadata such as type records)
shared -- parent-count mapping for records in index
module/type/class dictionaries
"""
# things which will have their dictionaries compressed out
to_delete = set()
for to_simplify in iterindex(index):
if to_simplify['address'] in to_delete:
continue
if to_simplify['type'] in simplify_dicts and not 'compressed' in to_simplify:
refs = to_simplify['refs']
for ref in refs:
child = index.get( ref )
if child is not None and child['type'] == 'dict':
child_referrers = child['parents'][:]
if len(child_referrers) == 1 or to_simplify['type'] in always_compress:
to_simplify['compressed'] = True
to_simplify['refs'] = child['refs']
to_simplify['size'] += child['size']
# rewrite anything *else* that was pointing to child to point to us...
while to_simplify['address'] in child_referrers:
child_referrers.remove( to_simplify['address'] )
if child_referrers:
rewrite_refs(
child_referrers,
child['address'],
to_simplify['address'],
index, single_ref=True
)
# now rewrite grandchildren to point to root obj instead of dict
for grandchild in child['refs']:
grandchild = index[grandchild]
parent_set = grandchild['parents']
if parent_set:
rewrite_references(
parent_set,
child,
to_simplify,
single_ref = True,
)
assert parent_set
to_delete.add( child['address'] )
for item in to_delete:
del index[item]
del shared[item]
return index | Eliminate "noise" dictionary records from the index
index -- overall index of objects (including metadata such as type records)
shared -- parent-count mapping for records in index
module/type/class dictionaries | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L289-L345 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | find_reachable | def find_reachable( modules, index, shared, stop_types=STOP_TYPES ):
"""Find the set of all reachable objects from given root nodes (modules)"""
reachable = set()
already_seen = set()
for module in modules:
for child in recurse( module, index, stop_types=stop_types, already_seen=already_seen):
reachable.add( child['address'] )
return reachable | python | def find_reachable( modules, index, shared, stop_types=STOP_TYPES ):
"""Find the set of all reachable objects from given root nodes (modules)"""
reachable = set()
already_seen = set()
for module in modules:
for child in recurse( module, index, stop_types=stop_types, already_seen=already_seen):
reachable.add( child['address'] )
return reachable | Find the set of all reachable objects from given root nodes (modules) | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L347-L354 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | deparent_unreachable | def deparent_unreachable( reachable, shared ):
"""Eliminate all parent-links from unreachable objects from reachable objects
"""
for id,shares in shared.iteritems():
if id in reachable: # child is reachable
filtered = [
x
for x in shares
if x in reachable # only those parents which are reachable
]
if len(filtered) != len(shares):
shares[:] = filtered | python | def deparent_unreachable( reachable, shared ):
"""Eliminate all parent-links from unreachable objects from reachable objects
"""
for id,shares in shared.iteritems():
if id in reachable: # child is reachable
filtered = [
x
for x in shares
if x in reachable # only those parents which are reachable
]
if len(filtered) != len(shares):
shares[:] = filtered | Eliminate all parent-links from unreachable objects from reachable objects | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L356-L367 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | bind_parents | def bind_parents( index, shared ):
"""Set parents on all items in index"""
for v in iterindex( index ):
v['parents'] = shared.get( v['address'], [] ) | python | def bind_parents( index, shared ):
"""Set parents on all items in index"""
for v in iterindex( index ):
v['parents'] = shared.get( v['address'], [] ) | Set parents on all items in index | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L392-L395 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | find_roots | def find_roots( disconnected, index, shared ):
"""Find appropriate "root" objects from which to recurse the hierarchies
Will generate a synthetic root for anything which doesn't have any parents...
"""
log.warn( '%s disconnected objects in %s total objects', len(disconnected), len(index))
natural_roots = [x for x in disconnected if x.get('refs') and not x.get('parents')]
log.warn( '%s objects with no parents at all' ,len(natural_roots))
for natural_root in natural_roots:
recurse_module(
natural_root, index, shared
)
yield natural_root
rest = [x for x in disconnected if x.get( 'totsize' ) is None]
un_found = {
'type': 'module',
'name': '<disconnected objects>',
'children': rest,
'parents': [ ],
'size': 0,
'totsize': sum([x['size'] for x in rest],0),
'address': new_address( index ),
}
index[un_found['address']] = un_found
yield un_found | python | def find_roots( disconnected, index, shared ):
"""Find appropriate "root" objects from which to recurse the hierarchies
Will generate a synthetic root for anything which doesn't have any parents...
"""
log.warn( '%s disconnected objects in %s total objects', len(disconnected), len(index))
natural_roots = [x for x in disconnected if x.get('refs') and not x.get('parents')]
log.warn( '%s objects with no parents at all' ,len(natural_roots))
for natural_root in natural_roots:
recurse_module(
natural_root, index, shared
)
yield natural_root
rest = [x for x in disconnected if x.get( 'totsize' ) is None]
un_found = {
'type': 'module',
'name': '<disconnected objects>',
'children': rest,
'parents': [ ],
'size': 0,
'totsize': sum([x['size'] for x in rest],0),
'address': new_address( index ),
}
index[un_found['address']] = un_found
yield un_found | Find appropriate "root" objects from which to recurse the hierarchies
Will generate a synthetic root for anything which doesn't have any parents... | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L509-L533 |
lrq3000/pyFileFixity | pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py | Loader.get_root | def get_root( self, key ):
"""Retrieve the given root by type-key"""
if key not in self.roots:
root,self.rows = load( self.filename, include_interpreter = self.include_interpreter )
self.roots[key] = root
return self.roots[key] | python | def get_root( self, key ):
"""Retrieve the given root by type-key"""
if key not in self.roots:
root,self.rows = load( self.filename, include_interpreter = self.include_interpreter )
self.roots[key] = root
return self.roots[key] | Retrieve the given root by type-key | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L549-L554 |
douban/brownant | brownant/dinergate.py | Dinergate.url | def url(self):
"""The fetching target URL.
The default behavior of this property is build URL string with the
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE`.
The subclasses could override
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE` or use a different
implementation.
"""
if not self.URL_TEMPLATE:
raise NotImplementedError
return self.URL_TEMPLATE.format(self=self) | python | def url(self):
"""The fetching target URL.
The default behavior of this property is build URL string with the
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE`.
The subclasses could override
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE` or use a different
implementation.
"""
if not self.URL_TEMPLATE:
raise NotImplementedError
return self.URL_TEMPLATE.format(self=self) | The fetching target URL.
The default behavior of this property is build URL string with the
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE`.
The subclasses could override
:const:`~brownant.dinergate.Dinergate.URL_TEMPLATE` or use a different
implementation. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/dinergate.py#L60-L72 |
douban/brownant | brownant/site.py | Site.record_action | def record_action(self, method_name, *args, **kwargs):
"""Record the method-calling action.
The actions expect to be played on an target object.
:param method_name: the name of called method.
:param args: the general arguments for calling method.
:param kwargs: the keyword arguments for calling method.
"""
self.actions.append((method_name, args, kwargs)) | python | def record_action(self, method_name, *args, **kwargs):
"""Record the method-calling action.
The actions expect to be played on an target object.
:param method_name: the name of called method.
:param args: the general arguments for calling method.
:param kwargs: the keyword arguments for calling method.
"""
self.actions.append((method_name, args, kwargs)) | Record the method-calling action.
The actions expect to be played on an target object.
:param method_name: the name of called method.
:param args: the general arguments for calling method.
:param kwargs: the keyword arguments for calling method. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/site.py#L14-L23 |
douban/brownant | brownant/site.py | Site.play_actions | def play_actions(self, target):
"""Play record actions on the target object.
:param target: the target which recive all record actions, is a brown
ant app instance normally.
:type target: :class:`~brownant.app.Brownant`
"""
for method_name, args, kwargs in self.actions:
method = getattr(target, method_name)
method(*args, **kwargs) | python | def play_actions(self, target):
"""Play record actions on the target object.
:param target: the target which recive all record actions, is a brown
ant app instance normally.
:type target: :class:`~brownant.app.Brownant`
"""
for method_name, args, kwargs in self.actions:
method = getattr(target, method_name)
method(*args, **kwargs) | Play record actions on the target object.
:param target: the target which recive all record actions, is a brown
ant app instance normally.
:type target: :class:`~brownant.app.Brownant` | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/site.py#L25-L34 |
douban/brownant | brownant/site.py | Site.route | def route(self, host, rule, **options):
"""The decorator to register wrapped function as the brown ant app.
All optional parameters of this method are compatible with the
:meth:`~brownant.app.Brownant.add_url_rule`.
Registered functions or classes must be import-able with its qualified
name. It is different from the :class:`~flask.Flask`, but like a
lazy-loading mode. Registered objects only be loaded before the first
using.
The right way::
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
The wrong way::
def egg():
# the function could not be imported by its qualified name
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
egg()
:param host: the limited host name.
:param rule: the URL path rule as string.
:param options: the options to be forwarded to the
:class:`werkzeug.routing.Rule` object.
"""
def decorator(func):
endpoint = "{func.__module__}:{func.__name__}".format(func=func)
self.record_action("add_url_rule", host, rule, endpoint, **options)
return func
return decorator | python | def route(self, host, rule, **options):
"""The decorator to register wrapped function as the brown ant app.
All optional parameters of this method are compatible with the
:meth:`~brownant.app.Brownant.add_url_rule`.
Registered functions or classes must be import-able with its qualified
name. It is different from the :class:`~flask.Flask`, but like a
lazy-loading mode. Registered objects only be loaded before the first
using.
The right way::
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
The wrong way::
def egg():
# the function could not be imported by its qualified name
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
egg()
:param host: the limited host name.
:param rule: the URL path rule as string.
:param options: the options to be forwarded to the
:class:`werkzeug.routing.Rule` object.
"""
def decorator(func):
endpoint = "{func.__module__}:{func.__name__}".format(func=func)
self.record_action("add_url_rule", host, rule, endpoint, **options)
return func
return decorator | The decorator to register wrapped function as the brown ant app.
All optional parameters of this method are compatible with the
:meth:`~brownant.app.Brownant.add_url_rule`.
Registered functions or classes must be import-able with its qualified
name. It is different from the :class:`~flask.Flask`, but like a
lazy-loading mode. Registered objects only be loaded before the first
using.
The right way::
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
The wrong way::
def egg():
# the function could not be imported by its qualified name
@site.route("www.example.com", "/item/<int:item_id>")
def spam(request, item_id):
pass
egg()
:param host: the limited host name.
:param rule: the URL path rule as string.
:param options: the options to be forwarded to the
:class:`werkzeug.routing.Rule` object. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/site.py#L36-L72 |
douban/brownant | brownant/utils.py | to_bytes_safe | def to_bytes_safe(text, encoding="utf-8"):
"""Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
"""
if not isinstance(text, (bytes, text_type)):
raise TypeError("must be string type")
if isinstance(text, text_type):
return text.encode(encoding)
return text | python | def to_bytes_safe(text, encoding="utf-8"):
"""Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes`
"""
if not isinstance(text, (bytes, text_type)):
raise TypeError("must be string type")
if isinstance(text, text_type):
return text.encode(encoding)
return text | Convert the input value into bytes type.
If the input value is string type and could be encode as UTF-8 bytes, the
encoded value will be returned. Otherwise, the encoding has failed, the
origin value will be returned as well.
:param text: the input value which could be string or bytes.
:param encoding: the expected encoding be used while converting the string
input into bytes.
:rtype: :class:`~__builtin__.bytes` | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/utils.py#L4-L22 |
douban/brownant | brownant/pipeline/base.py | PipelineProperty.get_attr | def get_attr(self, obj, name):
"""Get attribute of the target object with the configured attribute
name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`
of this instance.
:param obj: the target object.
:type obj: :class:`~brownant.dinergate.Dinergate`
:param name: the internal name used in the
:attr:`~brownant.pipeline.base.PipelineProperty.attr_names`.
(.e.g. `"text_attr"`)
"""
attr_name = self.attr_names[name]
return getattr(obj, attr_name) | python | def get_attr(self, obj, name):
"""Get attribute of the target object with the configured attribute
name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`
of this instance.
:param obj: the target object.
:type obj: :class:`~brownant.dinergate.Dinergate`
:param name: the internal name used in the
:attr:`~brownant.pipeline.base.PipelineProperty.attr_names`.
(.e.g. `"text_attr"`)
"""
attr_name = self.attr_names[name]
return getattr(obj, attr_name) | Get attribute of the target object with the configured attribute
name in the :attr:`~brownant.pipeline.base.PipelineProperty.attr_names`
of this instance.
:param obj: the target object.
:type obj: :class:`~brownant.dinergate.Dinergate`
:param name: the internal name used in the
:attr:`~brownant.pipeline.base.PipelineProperty.attr_names`.
(.e.g. `"text_attr"`) | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/pipeline/base.py#L83-L95 |
douban/brownant | brownant/app.py | Brownant.add_url_rule | def add_url_rule(self, host, rule_string, endpoint, **options):
"""Add a url rule to the app instance.
The url rule is the same with Flask apps and other Werkzeug apps.
:param host: the matched hostname. e.g. "www.python.org"
:param rule_string: the matched path pattern. e.g. "/news/<int:id>"
:param endpoint: the endpoint name as a dispatching key such as the
qualified name of the object.
"""
rule = Rule(rule_string, host=host, endpoint=endpoint, **options)
self.url_map.add(rule) | python | def add_url_rule(self, host, rule_string, endpoint, **options):
"""Add a url rule to the app instance.
The url rule is the same with Flask apps and other Werkzeug apps.
:param host: the matched hostname. e.g. "www.python.org"
:param rule_string: the matched path pattern. e.g. "/news/<int:id>"
:param endpoint: the endpoint name as a dispatching key such as the
qualified name of the object.
"""
rule = Rule(rule_string, host=host, endpoint=endpoint, **options)
self.url_map.add(rule) | Add a url rule to the app instance.
The url rule is the same with Flask apps and other Werkzeug apps.
:param host: the matched hostname. e.g. "www.python.org"
:param rule_string: the matched path pattern. e.g. "/news/<int:id>"
:param endpoint: the endpoint name as a dispatching key such as the
qualified name of the object. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L23-L34 |
douban/brownant | brownant/app.py | Brownant.parse_url | def parse_url(self, url_string):
"""Parse the URL string with the url map of this app instance.
:param url_string: the origin URL string.
:returns: the tuple as `(url, url_adapter, query_args)`, the url is
parsed by the standard library `urlparse`, the url_adapter is
from the werkzeug bound URL map, the query_args is a
multidict from the werkzeug.
"""
url = urllib.parse.urlparse(url_string)
url = self.validate_url(url)
url_adapter = self.url_map.bind(server_name=url.hostname,
url_scheme=url.scheme,
path_info=url.path)
query_args = url_decode(url.query)
return url, url_adapter, query_args | python | def parse_url(self, url_string):
"""Parse the URL string with the url map of this app instance.
:param url_string: the origin URL string.
:returns: the tuple as `(url, url_adapter, query_args)`, the url is
parsed by the standard library `urlparse`, the url_adapter is
from the werkzeug bound URL map, the query_args is a
multidict from the werkzeug.
"""
url = urllib.parse.urlparse(url_string)
url = self.validate_url(url)
url_adapter = self.url_map.bind(server_name=url.hostname,
url_scheme=url.scheme,
path_info=url.path)
query_args = url_decode(url.query)
return url, url_adapter, query_args | Parse the URL string with the url map of this app instance.
:param url_string: the origin URL string.
:returns: the tuple as `(url, url_adapter, query_args)`, the url is
parsed by the standard library `urlparse`, the url_adapter is
from the werkzeug bound URL map, the query_args is a
multidict from the werkzeug. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L36-L51 |
douban/brownant | brownant/app.py | Brownant.validate_url | def validate_url(self, url):
"""Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult`
"""
# fix up the non-ascii path
url_path = to_bytes_safe(url.path)
url_path = urllib.parse.quote(url_path, safe=b"/%")
# fix up the non-ascii query
url_query = to_bytes_safe(url.query)
url_query = urllib.parse.quote(url_query, safe=b"?=&")
url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path,
url.params, url_query, url.fragment)
# validate the components of URL
has_hostname = url.hostname is not None and len(url.hostname) > 0
has_http_scheme = url.scheme in ("http", "https")
has_path = not len(url.path) or url.path.startswith("/")
if not (has_hostname and has_http_scheme and has_path):
raise NotSupported("invalid url: %s" % repr(url))
return url | python | def validate_url(self, url):
"""Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult`
"""
# fix up the non-ascii path
url_path = to_bytes_safe(url.path)
url_path = urllib.parse.quote(url_path, safe=b"/%")
# fix up the non-ascii query
url_query = to_bytes_safe(url.query)
url_query = urllib.parse.quote(url_query, safe=b"?=&")
url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path,
url.params, url_query, url.fragment)
# validate the components of URL
has_hostname = url.hostname is not None and len(url.hostname) > 0
has_http_scheme = url.scheme in ("http", "https")
has_path = not len(url.path) or url.path.startswith("/")
if not (has_hostname and has_http_scheme and has_path):
raise NotSupported("invalid url: %s" % repr(url))
return url | Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult` | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L53-L81 |
douban/brownant | brownant/app.py | Brownant.dispatch_url | def dispatch_url(self, url_string):
"""Dispatch the URL string to the target endpoint function.
:param url_string: the origin URL string.
:returns: the return value of calling dispatched function.
"""
url, url_adapter, query_args = self.parse_url(url_string)
try:
endpoint, kwargs = url_adapter.match()
except NotFound:
raise NotSupported(url_string)
except RequestRedirect as e:
new_url = "{0.new_url}?{1}".format(e, url_encode(query_args))
return self.dispatch_url(new_url)
try:
handler = import_string(endpoint)
request = Request(url=url, args=query_args)
return handler(request, **kwargs)
except RequestRedirect as e:
return self.dispatch_url(e.new_url) | python | def dispatch_url(self, url_string):
"""Dispatch the URL string to the target endpoint function.
:param url_string: the origin URL string.
:returns: the return value of calling dispatched function.
"""
url, url_adapter, query_args = self.parse_url(url_string)
try:
endpoint, kwargs = url_adapter.match()
except NotFound:
raise NotSupported(url_string)
except RequestRedirect as e:
new_url = "{0.new_url}?{1}".format(e, url_encode(query_args))
return self.dispatch_url(new_url)
try:
handler = import_string(endpoint)
request = Request(url=url, args=query_args)
return handler(request, **kwargs)
except RequestRedirect as e:
return self.dispatch_url(e.new_url) | Dispatch the URL string to the target endpoint function.
:param url_string: the origin URL string.
:returns: the return value of calling dispatched function. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L83-L104 |
douban/brownant | brownant/app.py | Brownant.mount_site | def mount_site(self, site):
"""Mount a supported site to this app instance.
:param site: the site instance be mounted.
"""
if isinstance(site, string_types):
site = import_string(site)
site.play_actions(target=self) | python | def mount_site(self, site):
"""Mount a supported site to this app instance.
:param site: the site instance be mounted.
"""
if isinstance(site, string_types):
site = import_string(site)
site.play_actions(target=self) | Mount a supported site to this app instance.
:param site: the site instance be mounted. | https://github.com/douban/brownant/blob/3c7e6d30f67b8f0f8ca1f823ea3daed74e8725cd/brownant/app.py#L106-L113 |
taskcluster/taskcluster-client.py | taskcluster/github.py | Github.ping | def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs) | python | def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs) | Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/github.py#L31-L41 |
taskcluster/taskcluster-client.py | taskcluster/github.py | Github.githubWebHookConsumer | def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs) | python | def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs) | Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/github.py#L43-L53 |
taskcluster/taskcluster-client.py | taskcluster/github.py | Github.badge | def badge(self, *args, **kwargs):
"""
Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs) | python | def badge(self, *args, **kwargs):
"""
Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs) | Latest Build Status Badge
Checks the status of the latest build of a given branch
and returns corresponding badge svg.
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/github.py#L70-L80 |
taskcluster/taskcluster-client.py | taskcluster/github.py | Github.createComment | def createComment(self, *args, **kwargs):
"""
Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs) | python | def createComment(self, *args, **kwargs):
"""
Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs) | Post a comment on a given GitHub Issue or Pull Request
For a given Issue or Pull Request of a repository, this will write a new message.
This method takes input: ``v1/create-comment.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/github.py#L127-L138 |
jart/fabulous | fabulous/gotham.py | lorem_gotham | def lorem_gotham():
"""Cheesy Gothic Poetry Generator
Uses Python generators to yield eternal angst.
When you need to generate random verbiage to test your code or
typographic design, let's face it... Lorem Ipsum and "the quick
brown fox" are old and boring!
What you need is something with *flavor*, the kind of thing a
depressed teenager with a lot of black makeup would write.
"""
w = lambda l: l[random.randrange(len(l))]
er = lambda w: w[:-1]+'ier' if w.endswith('y') else (w+'r' if w.endswith('e') else w+'er')
s = lambda w: w+'s'
punc = lambda c, *l: " ".join(l)+c
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
while True:
yield pick(
sentence('the',w(adj),w(them),'and the',w(them),w(them_verb)),
sentence('delivering me to',w(place)),
sentence('they',w(action),'my',w(me_part),'and',w(me_verb),'with all my',w(feeling)),
sentence('in the',w(place),'my',w(feeling),'shall',w(me_verb)),
sentence(punc(',', er(w(adj)),'than the a petty',w(feeling))),
sentence(er(w(adj)),'than',w(them),'in',w(place)),
sentence(punc('!','oh my',w(me_part)),punc('!','the',w(feeling))),
sentence('no one',s(w(angst)),'why the',w(them),w(them_verb + me_verb))) | python | def lorem_gotham():
"""Cheesy Gothic Poetry Generator
Uses Python generators to yield eternal angst.
When you need to generate random verbiage to test your code or
typographic design, let's face it... Lorem Ipsum and "the quick
brown fox" are old and boring!
What you need is something with *flavor*, the kind of thing a
depressed teenager with a lot of black makeup would write.
"""
w = lambda l: l[random.randrange(len(l))]
er = lambda w: w[:-1]+'ier' if w.endswith('y') else (w+'r' if w.endswith('e') else w+'er')
s = lambda w: w+'s'
punc = lambda c, *l: " ".join(l)+c
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
while True:
yield pick(
sentence('the',w(adj),w(them),'and the',w(them),w(them_verb)),
sentence('delivering me to',w(place)),
sentence('they',w(action),'my',w(me_part),'and',w(me_verb),'with all my',w(feeling)),
sentence('in the',w(place),'my',w(feeling),'shall',w(me_verb)),
sentence(punc(',', er(w(adj)),'than the a petty',w(feeling))),
sentence(er(w(adj)),'than',w(them),'in',w(place)),
sentence(punc('!','oh my',w(me_part)),punc('!','the',w(feeling))),
sentence('no one',s(w(angst)),'why the',w(them),w(them_verb + me_verb))) | Cheesy Gothic Poetry Generator
Uses Python generators to yield eternal angst.
When you need to generate random verbiage to test your code or
typographic design, let's face it... Lorem Ipsum and "the quick
brown fox" are old and boring!
What you need is something with *flavor*, the kind of thing a
depressed teenager with a lot of black makeup would write. | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L65-L92 |
jart/fabulous | fabulous/gotham.py | lorem_gotham_title | def lorem_gotham_title():
"""Names your poem
"""
w = lambda l: l[random.randrange(len(l))]
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
return pick(
sentence('why i',w(me_verb)),
sentence(w(place)),
sentence('a',w(adj),w(adj),w(place)),
sentence('the',w(them))) | python | def lorem_gotham_title():
"""Names your poem
"""
w = lambda l: l[random.randrange(len(l))]
sentence = lambda *l: lambda: " ".join(l)
pick = lambda *l: (l[random.randrange(len(l))])()
return pick(
sentence('why i',w(me_verb)),
sentence(w(place)),
sentence('a',w(adj),w(adj),w(place)),
sentence('the',w(them))) | Names your poem | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L95-L105 |
jart/fabulous | fabulous/gotham.py | main | def main():
"""I provide a command-line interface for this module
"""
print()
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print(lorem_gotham_title().center(50))
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print()
poem = lorem_gotham()
for n in range(16):
if n in (4, 8, 12):
print()
print(next(poem))
print() | python | def main():
"""I provide a command-line interface for this module
"""
print()
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print(lorem_gotham_title().center(50))
print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-")
print()
poem = lorem_gotham()
for n in range(16):
if n in (4, 8, 12):
print()
print(next(poem))
print() | I provide a command-line interface for this module | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/gotham.py#L108-L121 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.listWorkerTypes | async def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs) | python | async def listWorkerTypes(self, *args, **kwargs):
"""
See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs) | See the list of worker types which are known to be managed
This method is only for debugging the ec2-manager
This method gives output: ``v1/list-worker-types.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L36-L47 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.runInstance | async def runInstance(self, *args, **kwargs):
"""
Run an instance
Request an instance of a worker type
This method takes input: ``v1/run-instance-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs) | python | async def runInstance(self, *args, **kwargs):
"""
Run an instance
Request an instance of a worker type
This method takes input: ``v1/run-instance-request.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["runInstance"], *args, **kwargs) | Run an instance
Request an instance of a worker type
This method takes input: ``v1/run-instance-request.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L49-L60 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.workerTypeStats | async def workerTypeStats(self, *args, **kwargs):
"""
Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``v1/worker-type-resources.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs) | python | async def workerTypeStats(self, *args, **kwargs):
"""
Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``v1/worker-type-resources.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeStats"], *args, **kwargs) | Look up the resource stats for a workerType
Return an object which has a generic state description. This only contains counts of instances
This method gives output: ``v1/worker-type-resources.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L73-L84 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.workerTypeHealth | async def workerTypeHealth(self, *args, **kwargs):
"""
Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs) | python | async def workerTypeHealth(self, *args, **kwargs):
"""
Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeHealth"], *args, **kwargs) | Look up the resource health for a workerType
Return a view of the health of a given worker type
This method gives output: ``v1/health.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L86-L97 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.workerTypeErrors | async def workerTypeErrors(self, *args, **kwargs):
"""
Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs) | python | async def workerTypeErrors(self, *args, **kwargs):
"""
Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeErrors"], *args, **kwargs) | Look up the most recent errors of a workerType
Return a list of the most recent errors encountered by a worker type
This method gives output: ``v1/errors.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L99-L110 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.workerTypeState | async def workerTypeState(self, *args, **kwargs):
"""
Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``v1/worker-type-state.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs) | python | async def workerTypeState(self, *args, **kwargs):
"""
Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``v1/worker-type-state.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["workerTypeState"], *args, **kwargs) | Look up the resource state for a workerType
Return state information for a given worker type
This method gives output: ``v1/worker-type-state.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L112-L123 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.ensureKeyPair | async def ensureKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``v1/create-key-pair.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs) | python | async def ensureKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``v1/create-key-pair.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ensureKeyPair"], *args, **kwargs) | Ensure a KeyPair for a given worker type exists
Idempotently ensure that a keypair of a given name exists
This method takes input: ``v1/create-key-pair.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L125-L136 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.removeKeyPair | async def removeKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs) | python | async def removeKeyPair(self, *args, **kwargs):
"""
Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["removeKeyPair"], *args, **kwargs) | Ensure a KeyPair for a given worker type does not exist
Ensure that a keypair of a given name does not exist.
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L138-L147 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.terminateInstance | async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs) | python | async def terminateInstance(self, *args, **kwargs):
"""
Terminate an instance
Terminate an instance in a specified region
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["terminateInstance"], *args, **kwargs) | Terminate an instance
Terminate an instance in a specified region
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L149-L158 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.getSpecificPrices | async def getSpecificPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method takes input: ``v1/prices-request.json#``
This method gives output: ``v1/prices.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getSpecificPrices"], *args, **kwargs) | python | async def getSpecificPrices(self, *args, **kwargs):
"""
Request prices for EC2
Return a list of possible prices for EC2
This method takes input: ``v1/prices-request.json#``
This method gives output: ``v1/prices.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getSpecificPrices"], *args, **kwargs) | Request prices for EC2
Return a list of possible prices for EC2
This method takes input: ``v1/prices-request.json#``
This method gives output: ``v1/prices.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L173-L186 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.getHealth | async def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs) | python | async def getHealth(self, *args, **kwargs):
"""
Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``v1/health.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getHealth"], *args, **kwargs) | Get EC2 account health metrics
Give some basic stats on the health of our EC2 account
This method gives output: ``v1/health.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L188-L199 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.getRecentErrors | async def getRecentErrors(self, *args, **kwargs):
"""
Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs) | python | async def getRecentErrors(self, *args, **kwargs):
"""
Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``v1/errors.json#``
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["getRecentErrors"], *args, **kwargs) | Look up the most recent errors in the provisioner across all worker types
Return a list of recent errors encountered
This method gives output: ``v1/errors.json#``
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L201-L212 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.regions | async def regions(self, *args, **kwargs):
"""
See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs) | python | async def regions(self, *args, **kwargs):
"""
See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs) | See the list of regions managed by this ec2-manager
This method is only for debugging the ec2-manager
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L214-L223 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.amiUsage | async def amiUsage(self, *args, **kwargs):
"""
See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs) | python | async def amiUsage(self, *args, **kwargs):
"""
See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["amiUsage"], *args, **kwargs) | See the list of AMIs and their usage
List AMIs and their usage by returning a list of objects in the form:
{
region: string
volumetype: string
lastused: timestamp
}
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L225-L239 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.ebsUsage | async def ebsUsage(self, *args, **kwargs):
"""
See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs) | python | async def ebsUsage(self, *args, **kwargs):
"""
See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["ebsUsage"], *args, **kwargs) | See the current EBS volume usage list
Lists current EBS volume usage by returning a list of objects
that are uniquely defined by {region, volumetype, state} in the form:
{
region: string,
volumetype: string,
state: string,
totalcount: integer,
totalgb: integer,
touched: timestamp (last time that information was updated),
}
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L241-L259 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.dbpoolStats | async def dbpoolStats(self, *args, **kwargs):
"""
Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs) | python | async def dbpoolStats(self, *args, **kwargs):
"""
Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["dbpoolStats"], *args, **kwargs) | Statistics on the Database client pool
This method is only for debugging the ec2-manager
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L261-L270 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.sqsStats | async def sqsStats(self, *args, **kwargs):
"""
Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs) | python | async def sqsStats(self, *args, **kwargs):
"""
Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["sqsStats"], *args, **kwargs) | Statistics on the sqs queues
This method is only for debugging the ec2-manager
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L283-L292 |
taskcluster/taskcluster-client.py | taskcluster/aio/ec2manager.py | EC2Manager.purgeQueues | async def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs) | python | async def purgeQueues(self, *args, **kwargs):
"""
Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental``
"""
return await self._makeApiCall(self.funcinfo["purgeQueues"], *args, **kwargs) | Purge the SQS queues
This method is only for debugging the ec2-manager
This method is ``experimental`` | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/aio/ec2manager.py#L294-L303 |
taskcluster/taskcluster-client.py | taskcluster/githubevents.py | GithubEvents.pullRequest | def pullRequest(self, *args, **kwargs):
"""
GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
"""
ref = {
'exchange': 'pull-request',
'name': 'pullRequest',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
{
'multipleWords': False,
'name': 'action',
},
],
'schema': 'v1/github-pull-request-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def pullRequest(self, *args, **kwargs):
"""
GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
"""
ref = {
'exchange': 'pull-request',
'name': 'pullRequest',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
{
'multipleWords': False,
'name': 'action',
},
],
'schema': 'v1/github-pull-request-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | GitHub Pull Request Event
When a GitHub pull request event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-pull-request-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required) | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/githubevents.py#L30-L73 |
taskcluster/taskcluster-client.py | taskcluster/githubevents.py | GithubEvents.push | def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def push(self, *args, **kwargs):
"""
GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'push',
'name': 'push',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-push-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | GitHub push Event
When a GitHub push event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-push-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/githubevents.py#L75-L112 |
taskcluster/taskcluster-client.py | taskcluster/githubevents.py | GithubEvents.release | def release(self, *args, **kwargs):
"""
GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'release',
'name': 'release',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-release-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def release(self, *args, **kwargs):
"""
GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'release',
'name': 'release',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/github-release-message.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | GitHub release Event
When a GitHub release event is posted it will be broadcast on this
exchange with the designated `organization` and `repository`
in the routing-key along with event specific metadata in the payload.
This exchange outputs: ``v1/github-release-message.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/githubevents.py#L114-L151 |
taskcluster/taskcluster-client.py | taskcluster/githubevents.py | GithubEvents.taskGroupCreationRequested | def taskGroupCreationRequested(self, *args, **kwargs):
"""
tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'task-group-creation-requested',
'name': 'taskGroupCreationRequested',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/task-group-creation-requested.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | python | def taskGroupCreationRequested(self, *args, **kwargs):
"""
tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
"""
ref = {
'exchange': 'task-group-creation-requested',
'name': 'taskGroupCreationRequested',
'routingKey': [
{
'constant': 'primary',
'multipleWords': False,
'name': 'routingKeyKind',
},
{
'multipleWords': False,
'name': 'organization',
},
{
'multipleWords': False,
'name': 'repository',
},
],
'schema': 'v1/task-group-creation-requested.json#',
}
return self._makeTopicExchange(ref, *args, **kwargs) | tc-gh requested the Queue service to create all the tasks in a group
supposed to signal that taskCreate API has been called for every task in the task group
for this particular repo and this particular organization
currently used for creating initial status indicators in GitHub UI using Statuses API.
This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
Pass in the array of routes as a second argument to the publish method. Currently, we do
use the statuses routes to bind the handler that creates the initial status.
This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys:
* routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
* organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
* repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/githubevents.py#L153-L193 |
jart/fabulous | fabulous/rotating_cube.py | rotating_cube | def rotating_cube(degree_change=3, frame_rate=3):
"""Rotating cube program
How it works:
1. Create two imaginary ellipses
2. Sized to fit in the top third and bottom third of screen
3. Create four imaginary points on each ellipse
4. Make those points the top and bottom corners of your cube
5. Connect the lines and render
6. Rotate the points on the ellipses and repeat
"""
degrees = 0
while True:
t1 = time.time()
with Frame() as frame:
oval_width = frame.width
oval_height = frame.height / 3.0
cube_height = int(oval_height * 2)
(p1_x, p1_y) = ellipse_point(degrees, oval_width, oval_height)
(p2_x, p2_y) = ellipse_point(degrees + 90, oval_width, oval_height)
(p3_x, p3_y) = ellipse_point(degrees + 180, oval_width, oval_height)
(p4_x, p4_y) = ellipse_point(degrees + 270, oval_width, oval_height)
degrees = (degrees + degree_change) % 360
# connect square thing at top
frame.line(p1_x, p1_y, p2_x, p2_y)
frame.line(p2_x, p2_y, p3_x, p3_y)
frame.line(p3_x, p3_y, p4_x, p4_y)
frame.line(p4_x, p4_y, p1_x, p1_y)
# connect top to bottom
frame.line(p1_x, p1_y, p1_x, p1_y + cube_height)
frame.line(p2_x, p2_y, p2_x, p2_y + cube_height)
frame.line(p3_x, p3_y, p3_x, p3_y + cube_height)
frame.line(p4_x, p4_y, p4_x, p4_y + cube_height)
# connect square thing at bottom
frame.line(p1_x, p1_y + cube_height, p2_x, p2_y + cube_height)
frame.line(p2_x, p2_y + cube_height, p3_x, p3_y + cube_height)
frame.line(p3_x, p3_y + cube_height, p4_x, p4_y + cube_height)
frame.line(p4_x, p4_y + cube_height, p1_x, p1_y + cube_height)
elapsed = (time.time() - t1)
time.sleep(abs(1.0 / frame_rate - elapsed)) | python | def rotating_cube(degree_change=3, frame_rate=3):
"""Rotating cube program
How it works:
1. Create two imaginary ellipses
2. Sized to fit in the top third and bottom third of screen
3. Create four imaginary points on each ellipse
4. Make those points the top and bottom corners of your cube
5. Connect the lines and render
6. Rotate the points on the ellipses and repeat
"""
degrees = 0
while True:
t1 = time.time()
with Frame() as frame:
oval_width = frame.width
oval_height = frame.height / 3.0
cube_height = int(oval_height * 2)
(p1_x, p1_y) = ellipse_point(degrees, oval_width, oval_height)
(p2_x, p2_y) = ellipse_point(degrees + 90, oval_width, oval_height)
(p3_x, p3_y) = ellipse_point(degrees + 180, oval_width, oval_height)
(p4_x, p4_y) = ellipse_point(degrees + 270, oval_width, oval_height)
degrees = (degrees + degree_change) % 360
# connect square thing at top
frame.line(p1_x, p1_y, p2_x, p2_y)
frame.line(p2_x, p2_y, p3_x, p3_y)
frame.line(p3_x, p3_y, p4_x, p4_y)
frame.line(p4_x, p4_y, p1_x, p1_y)
# connect top to bottom
frame.line(p1_x, p1_y, p1_x, p1_y + cube_height)
frame.line(p2_x, p2_y, p2_x, p2_y + cube_height)
frame.line(p3_x, p3_y, p3_x, p3_y + cube_height)
frame.line(p4_x, p4_y, p4_x, p4_y + cube_height)
# connect square thing at bottom
frame.line(p1_x, p1_y + cube_height, p2_x, p2_y + cube_height)
frame.line(p2_x, p2_y + cube_height, p3_x, p3_y + cube_height)
frame.line(p3_x, p3_y + cube_height, p4_x, p4_y + cube_height)
frame.line(p4_x, p4_y + cube_height, p1_x, p1_y + cube_height)
elapsed = (time.time() - t1)
time.sleep(abs(1.0 / frame_rate - elapsed)) | Rotating cube program
How it works:
1. Create two imaginary ellipses
2. Sized to fit in the top third and bottom third of screen
3. Create four imaginary points on each ellipse
4. Make those points the top and bottom corners of your cube
5. Connect the lines and render
6. Rotate the points on the ellipses and repeat | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/rotating_cube.py#L89-L136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.