repo_name
stringclasses 1
value | pr_number
int64 4.12k
11.2k
| pr_title
stringlengths 9
107
| pr_description
stringlengths 107
5.48k
| author
stringlengths 4
18
| date_created
unknown | date_merged
unknown | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 118
5.52k
| before_content
stringlengths 0
7.93M
| after_content
stringlengths 0
7.93M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import math
from collections.abc import Callable
def intersection(function: Callable[[float], float], x0: float, x1: float) -> float:
"""
function is the f we want to find its root
x0 and x1 are two random starting points
>>> intersection(lambda x: x ** 3 - 1, -5, 5)
0.9999999999954654
>>> intersection(lambda x: x ** 3 - 1, 5, 5)
Traceback (most recent call last):
...
ZeroDivisionError: float division by zero, could not find root
>>> intersection(lambda x: x ** 3 - 1, 100, 200)
1.0000000000003888
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 0, 2)
0.9999999998088019
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 2, 4)
2.9999999998088023
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 4, 1000)
3.0000000001786042
>>> intersection(math.sin, -math.pi, math.pi)
0.0
>>> intersection(math.cos, -math.pi, math.pi)
Traceback (most recent call last):
...
ZeroDivisionError: float division by zero, could not find root
"""
x_n: float = x0
x_n1: float = x1
while True:
if x_n == x_n1 or function(x_n1) == function(x_n):
raise ZeroDivisionError("float division by zero, could not find root")
x_n2: float = x_n1 - (
function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n))
)
if abs(x_n2 - x_n1) < 10**-5:
return x_n2
x_n = x_n1
x_n1 = x_n2
def f(x: float) -> float:
return math.pow(x, 3) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| import math
from collections.abc import Callable
def intersection(function: Callable[[float], float], x0: float, x1: float) -> float:
"""
function is the f we want to find its root
x0 and x1 are two random starting points
>>> intersection(lambda x: x ** 3 - 1, -5, 5)
0.9999999999954654
>>> intersection(lambda x: x ** 3 - 1, 5, 5)
Traceback (most recent call last):
...
ZeroDivisionError: float division by zero, could not find root
>>> intersection(lambda x: x ** 3 - 1, 100, 200)
1.0000000000003888
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 0, 2)
0.9999999998088019
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 2, 4)
2.9999999998088023
>>> intersection(lambda x: x ** 2 - 4 * x + 3, 4, 1000)
3.0000000001786042
>>> intersection(math.sin, -math.pi, math.pi)
0.0
>>> intersection(math.cos, -math.pi, math.pi)
Traceback (most recent call last):
...
ZeroDivisionError: float division by zero, could not find root
"""
x_n: float = x0
x_n1: float = x1
while True:
if x_n == x_n1 or function(x_n1) == function(x_n):
raise ZeroDivisionError("float division by zero, could not find root")
x_n2: float = x_n1 - (
function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n))
)
if abs(x_n2 - x_n1) < 10**-5:
return x_n2
x_n = x_n1
x_n1 = x_n2
def f(x: float) -> float:
return math.pow(x, 3) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Graph Coloring also called "m coloring problem"
consists of coloring a given graph with at most m colors
such that no adjacent vertices are assigned the same color
Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring
"""
def valid_coloring(
neighbours: list[int], colored_vertices: list[int], color: int
) -> bool:
"""
For each neighbour check if the coloring constraint is satisfied
If any of the neighbours fail the constraint return False
If all neighbours validate the constraint return True
>>> neighbours = [0,1,0,1,0]
>>> colored_vertices = [0, 2, 1, 2, 0]
>>> color = 1
>>> valid_coloring(neighbours, colored_vertices, color)
True
>>> color = 2
>>> valid_coloring(neighbours, colored_vertices, color)
False
"""
# Does any neighbour not satisfy the constraints
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(neighbours)
)
def util_color(
graph: list[list[int]], max_colors: int, colored_vertices: list[int], index: int
) -> bool:
"""
Pseudo-Code
Base Case:
1. Check if coloring is complete
1.1 If complete return True (meaning that we successfully colored the graph)
Recursive Step:
2. Iterates over each color:
Check if the current coloring is valid:
2.1. Color given vertex
2.2. Do recursive call, check if this coloring leads to a solution
2.4. if current coloring leads to a solution return
2.5. Uncolor given vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
# Base Case
if index == len(graph):
return True
# Recursive Step
for i in range(max_colors):
if valid_coloring(graph[index], colored_vertices, i):
# Color current vertex
colored_vertices[index] = i
# Validate coloring
if util_color(graph, max_colors, colored_vertices, index + 1):
return True
# Backtrack
colored_vertices[index] = -1
return False
def color(graph: list[list[int]], max_colors: int) -> list[int]:
"""
Wrapper function to call subroutine called util_color
which will either return True or False.
If True is returned colored_vertices list is filled with correct colorings
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> color(graph, max_colors)
[0, 1, 0, 2, 0]
>>> max_colors = 2
>>> color(graph, max_colors)
[]
"""
colored_vertices = [-1] * len(graph)
if util_color(graph, max_colors, colored_vertices, 0):
return colored_vertices
return []
| """
Graph Coloring also called "m coloring problem"
consists of coloring a given graph with at most m colors
such that no adjacent vertices are assigned the same color
Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring
"""
def valid_coloring(
neighbours: list[int], colored_vertices: list[int], color: int
) -> bool:
"""
For each neighbour check if the coloring constraint is satisfied
If any of the neighbours fail the constraint return False
If all neighbours validate the constraint return True
>>> neighbours = [0,1,0,1,0]
>>> colored_vertices = [0, 2, 1, 2, 0]
>>> color = 1
>>> valid_coloring(neighbours, colored_vertices, color)
True
>>> color = 2
>>> valid_coloring(neighbours, colored_vertices, color)
False
"""
# Does any neighbour not satisfy the constraints
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(neighbours)
)
def util_color(
graph: list[list[int]], max_colors: int, colored_vertices: list[int], index: int
) -> bool:
"""
Pseudo-Code
Base Case:
1. Check if coloring is complete
1.1 If complete return True (meaning that we successfully colored the graph)
Recursive Step:
2. Iterates over each color:
Check if the current coloring is valid:
2.1. Color given vertex
2.2. Do recursive call, check if this coloring leads to a solution
2.4. if current coloring leads to a solution return
2.5. Uncolor given vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
# Base Case
if index == len(graph):
return True
# Recursive Step
for i in range(max_colors):
if valid_coloring(graph[index], colored_vertices, i):
# Color current vertex
colored_vertices[index] = i
# Validate coloring
if util_color(graph, max_colors, colored_vertices, index + 1):
return True
# Backtrack
colored_vertices[index] = -1
return False
def color(graph: list[list[int]], max_colors: int) -> list[int]:
"""
Wrapper function to call subroutine called util_color
which will either return True or False.
If True is returned colored_vertices list is filled with correct colorings
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> color(graph, max_colors)
[0, 1, 0, 2, 0]
>>> max_colors = 2
>>> color(graph, max_colors)
[]
"""
colored_vertices = [-1] * len(graph)
if util_color(graph, max_colors, colored_vertices, 0):
return colored_vertices
return []
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
render 3d points for 2d surfaces.
"""
from __future__ import annotations
import math
__version__ = "2020.9.26"
__author__ = "xcodz-dot, cclaus, dhruvmanila"
def convert_to_2d(
x: float, y: float, z: float, scale: float, distance: float
) -> tuple[float, float]:
"""
Converts 3d point to a 2d drawable point
>>> convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0)
(7.6923076923076925, 15.384615384615385)
>>> convert_to_2d(1, 2, 3, 10, 10)
(7.6923076923076925, 15.384615384615385)
>>> convert_to_2d("1", 2, 3, 10, 10) # '1' is str
Traceback (most recent call last):
...
TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10]
"""
if not all(isinstance(val, (float, int)) for val in locals().values()):
raise TypeError(
"Input values must either be float or int: " f"{list(locals().values())}"
)
projected_x = ((x * distance) / (z + distance)) * scale
projected_y = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def rotate(
x: float, y: float, z: float, axis: str, angle: float
) -> tuple[float, float, float]:
"""
rotate a point around a certain axis with a certain angle
angle can be any integer between 1, 360 and axis can be any one of
'x', 'y', 'z'
>>> rotate(1.0, 2.0, 3.0, 'y', 90.0)
(3.130524675073759, 2.0, 0.4470070007889556)
>>> rotate(1, 2, 3, "z", 180)
(0.999736015495891, -2.0001319704760485, 3)
>>> rotate('1', 2, 3, "z", 90.0) # '1' is str
Traceback (most recent call last):
...
TypeError: Input values except axis must either be float or int: ['1', 2, 3, 90.0]
>>> rotate(1, 2, 3, "n", 90) # 'n' is not a valid axis
Traceback (most recent call last):
...
ValueError: not a valid axis, choose one of 'x', 'y', 'z'
>>> rotate(1, 2, 3, "x", -90)
(1, -2.5049096187183877, -2.5933429780983657)
>>> rotate(1, 2, 3, "x", 450) # 450 wrap around to 90
(1, 3.5776792428178217, -0.44744970165427644)
"""
if not isinstance(axis, str):
raise TypeError("Axis must be a str")
input_variables = locals()
del input_variables["axis"]
if not all(isinstance(val, (float, int)) for val in input_variables.values()):
raise TypeError(
"Input values except axis must either be float or int: "
f"{list(input_variables.values())}"
)
angle = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
new_x = x * math.cos(angle) - y * math.sin(angle)
new_y = y * math.cos(angle) + x * math.sin(angle)
new_z = z
elif axis == "x":
new_y = y * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + y * math.sin(angle)
new_x = x
elif axis == "y":
new_x = x * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + x * math.sin(angle)
new_y = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| """
render 3d points for 2d surfaces.
"""
from __future__ import annotations
import math
__version__ = "2020.9.26"
__author__ = "xcodz-dot, cclaus, dhruvmanila"
def convert_to_2d(
x: float, y: float, z: float, scale: float, distance: float
) -> tuple[float, float]:
"""
Converts 3d point to a 2d drawable point
>>> convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0)
(7.6923076923076925, 15.384615384615385)
>>> convert_to_2d(1, 2, 3, 10, 10)
(7.6923076923076925, 15.384615384615385)
>>> convert_to_2d("1", 2, 3, 10, 10) # '1' is str
Traceback (most recent call last):
...
TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10]
"""
if not all(isinstance(val, (float, int)) for val in locals().values()):
raise TypeError(
"Input values must either be float or int: " f"{list(locals().values())}"
)
projected_x = ((x * distance) / (z + distance)) * scale
projected_y = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def rotate(
x: float, y: float, z: float, axis: str, angle: float
) -> tuple[float, float, float]:
"""
rotate a point around a certain axis with a certain angle
angle can be any integer between 1, 360 and axis can be any one of
'x', 'y', 'z'
>>> rotate(1.0, 2.0, 3.0, 'y', 90.0)
(3.130524675073759, 2.0, 0.4470070007889556)
>>> rotate(1, 2, 3, "z", 180)
(0.999736015495891, -2.0001319704760485, 3)
>>> rotate('1', 2, 3, "z", 90.0) # '1' is str
Traceback (most recent call last):
...
TypeError: Input values except axis must either be float or int: ['1', 2, 3, 90.0]
>>> rotate(1, 2, 3, "n", 90) # 'n' is not a valid axis
Traceback (most recent call last):
...
ValueError: not a valid axis, choose one of 'x', 'y', 'z'
>>> rotate(1, 2, 3, "x", -90)
(1, -2.5049096187183877, -2.5933429780983657)
>>> rotate(1, 2, 3, "x", 450) # 450 wrap around to 90
(1, 3.5776792428178217, -0.44744970165427644)
"""
if not isinstance(axis, str):
raise TypeError("Axis must be a str")
input_variables = locals()
del input_variables["axis"]
if not all(isinstance(val, (float, int)) for val in input_variables.values()):
raise TypeError(
"Input values except axis must either be float or int: "
f"{list(input_variables.values())}"
)
angle = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
new_x = x * math.cos(angle) - y * math.sin(angle)
new_y = y * math.cos(angle) + x * math.sin(angle)
new_z = z
elif axis == "x":
new_y = y * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + y * math.sin(angle)
new_x = x
elif axis == "y":
new_x = x * math.cos(angle) - z * math.sin(angle)
new_z = z * math.cos(angle) + x * math.sin(angle)
new_y = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Totient maximum
Problem 69: https://projecteuler.net/problem=69
Euler's Totient function, φ(n) [sometimes called the phi function],
is used to determine the number of numbers less than n which are relatively prime to n.
For example, as 1, 2, 4, 5, 7, and 8,
are all less than nine and relatively prime to nine, φ(9)=6.
n Relatively Prime φ(n) n/φ(n)
2 1 1 2
3 1,2 2 1.5
4 1,3 2 2
5 1,2,3,4 4 1.25
6 1,5 2 3
7 1,2,3,4,5,6 6 1.1666...
8 1,3,5,7 4 2
9 1,2,4,5,7,8 6 1.5
10 1,3,7,9 4 2.5
It can be seen that n=6 produces a maximum n/φ(n) for n ≤ 10.
Find the value of n ≤ 1,000,000 for which n/φ(n) is a maximum.
"""
def solution(n: int = 10**6) -> int:
"""
Returns solution to problem.
Algorithm:
1. Precompute φ(k) for all natural k, k <= n using product formula (wikilink below)
https://en.wikipedia.org/wiki/Euler%27s_totient_function#Euler's_product_formula
2. Find k/φ(k) for all k ≤ n and return the k that attains maximum
>>> solution(10)
6
>>> solution(100)
30
>>> solution(9973)
2310
"""
if n <= 0:
raise ValueError("Please enter an integer greater than 0")
phi = list(range(n + 1))
for number in range(2, n + 1):
if phi[number] == number:
phi[number] -= 1
for multiple in range(number * 2, n + 1, number):
phi[multiple] = (phi[multiple] // number) * (number - 1)
answer = 1
for number in range(1, n + 1):
if (answer / phi[answer]) < (number / phi[number]):
answer = number
return answer
if __name__ == "__main__":
print(solution())
| """
Totient maximum
Problem 69: https://projecteuler.net/problem=69
Euler's Totient function, φ(n) [sometimes called the phi function],
is used to determine the number of numbers less than n which are relatively prime to n.
For example, as 1, 2, 4, 5, 7, and 8,
are all less than nine and relatively prime to nine, φ(9)=6.
n Relatively Prime φ(n) n/φ(n)
2 1 1 2
3 1,2 2 1.5
4 1,3 2 2
5 1,2,3,4 4 1.25
6 1,5 2 3
7 1,2,3,4,5,6 6 1.1666...
8 1,3,5,7 4 2
9 1,2,4,5,7,8 6 1.5
10 1,3,7,9 4 2.5
It can be seen that n=6 produces a maximum n/φ(n) for n ≤ 10.
Find the value of n ≤ 1,000,000 for which n/φ(n) is a maximum.
"""
def solution(n: int = 10**6) -> int:
"""
Returns solution to problem.
Algorithm:
1. Precompute φ(k) for all natural k, k <= n using product formula (wikilink below)
https://en.wikipedia.org/wiki/Euler%27s_totient_function#Euler's_product_formula
2. Find k/φ(k) for all k ≤ n and return the k that attains maximum
>>> solution(10)
6
>>> solution(100)
30
>>> solution(9973)
2310
"""
if n <= 0:
raise ValueError("Please enter an integer greater than 0")
phi = list(range(n + 1))
for number in range(2, n + 1):
if phi[number] == number:
phi[number] -= 1
for multiple in range(number * 2, n + 1, number):
phi[multiple] = (phi[multiple] // number) * (number - 1)
answer = 1
for number in range(1, n + 1):
if (answer / phi[answer]) < (number / phi[number]):
answer = number
return answer
if __name__ == "__main__":
print(solution())
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| class Graph:
def __init__(self, vertex):
self.vertex = vertex
self.graph = [[0] * vertex for i in range(vertex)]
def add_edge(self, u, v):
self.graph[u - 1][v - 1] = 1
self.graph[v - 1][u - 1] = 1
def show(self):
for i in self.graph:
for j in i:
print(j, end=" ")
print(" ")
g = Graph(100)
g.add_edge(1, 4)
g.add_edge(4, 2)
g.add_edge(4, 5)
g.add_edge(2, 5)
g.add_edge(5, 3)
g.show()
| class Graph:
def __init__(self, vertex):
self.vertex = vertex
self.graph = [[0] * vertex for i in range(vertex)]
def add_edge(self, u, v):
self.graph[u - 1][v - 1] = 1
self.graph[v - 1][u - 1] = 1
def show(self):
for i in self.graph:
for j in i:
print(j, end=" ")
print(" ")
g = Graph(100)
g.add_edge(1, 4)
g.add_edge(4, 2)
g.add_edge(4, 5)
g.add_edge(2, 5)
g.add_edge(5, 3)
g.show()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
def collatz_sequence(n: int) -> list[int]:
"""
Collatz conjecture: start with any positive integer n. The next term is
obtained as follows:
If n term is even, the next term is: n / 2 .
If n is odd, the next term is: 3 * n + 1.
The conjecture states the sequence will always reach 1 for any starting value n.
Example:
>>> collatz_sequence(2.1)
Traceback (most recent call last):
...
Exception: Sequence only defined for natural numbers
>>> collatz_sequence(0)
Traceback (most recent call last):
...
Exception: Sequence only defined for natural numbers
>>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
if not isinstance(n, int) or n < 1:
raise Exception("Sequence only defined for natural numbers")
sequence = [n]
while n != 1:
n = 3 * n + 1 if n & 1 else n // 2
sequence.append(n)
return sequence
def main():
n = 43
sequence = collatz_sequence(n)
print(sequence)
print(f"collatz sequence from {n} took {len(sequence)} steps.")
if __name__ == "__main__":
main()
| from __future__ import annotations
def collatz_sequence(n: int) -> list[int]:
"""
Collatz conjecture: start with any positive integer n. The next term is
obtained as follows:
If n term is even, the next term is: n / 2 .
If n is odd, the next term is: 3 * n + 1.
The conjecture states the sequence will always reach 1 for any starting value n.
Example:
>>> collatz_sequence(2.1)
Traceback (most recent call last):
...
Exception: Sequence only defined for natural numbers
>>> collatz_sequence(0)
Traceback (most recent call last):
...
Exception: Sequence only defined for natural numbers
>>> collatz_sequence(43) # doctest: +NORMALIZE_WHITESPACE
[43, 130, 65, 196, 98, 49, 148, 74, 37, 112, 56, 28, 14, 7,
22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]
"""
if not isinstance(n, int) or n < 1:
raise Exception("Sequence only defined for natural numbers")
sequence = [n]
while n != 1:
n = 3 * n + 1 if n & 1 else n // 2
sequence.append(n)
return sequence
def main():
n = 43
sequence = collatz_sequence(n)
print(sequence)
print(f"collatz sequence from {n} took {len(sequence)} steps.")
if __name__ == "__main__":
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def next_greatest_element_slow(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
>>> next_greatest_element_slow(arr) == expect
True
"""
result = []
arr_size = len(arr)
for i in range(arr_size):
next_element: float = -1
for j in range(i + 1, arr_size):
if arr[i] < arr[j]:
next_element = arr[j]
break
result.append(next_element)
return result
def next_greatest_element_fast(arr: list[float]) -> list[float]:
"""
Like next_greatest_element_slow() but changes the loops to use
enumerate() instead of range(len()) for the outer loop and
for in a slice of arr for the inner loop.
>>> next_greatest_element_fast(arr) == expect
True
"""
result = []
for i, outer in enumerate(arr):
next_item: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
next_item = inner
break
result.append(next_item)
return result
def next_greatest_element(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
A naive way to solve this is to take two loops and check for the next bigger
number but that will make the time complexity as O(n^2). The better way to solve
this would be to use a stack to keep track of maximum number giving a linear time
solution.
>>> next_greatest_element(arr) == expect
True
"""
arr_size = len(arr)
stack: list[float] = []
result: list[float] = [-1] * arr_size
for index in reversed(range(arr_size)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
result[index] = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
setup = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| from __future__ import annotations
arr = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
expect = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def next_greatest_element_slow(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
>>> next_greatest_element_slow(arr) == expect
True
"""
result = []
arr_size = len(arr)
for i in range(arr_size):
next_element: float = -1
for j in range(i + 1, arr_size):
if arr[i] < arr[j]:
next_element = arr[j]
break
result.append(next_element)
return result
def next_greatest_element_fast(arr: list[float]) -> list[float]:
"""
Like next_greatest_element_slow() but changes the loops to use
enumerate() instead of range(len()) for the outer loop and
for in a slice of arr for the inner loop.
>>> next_greatest_element_fast(arr) == expect
True
"""
result = []
for i, outer in enumerate(arr):
next_item: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
next_item = inner
break
result.append(next_item)
return result
def next_greatest_element(arr: list[float]) -> list[float]:
"""
Get the Next Greatest Element (NGE) for all elements in a list.
Maximum element present after the current one which is also greater than the
current one.
A naive way to solve this is to take two loops and check for the next bigger
number but that will make the time complexity as O(n^2). The better way to solve
this would be to use a stack to keep track of maximum number giving a linear time
solution.
>>> next_greatest_element(arr) == expect
True
"""
arr_size = len(arr)
stack: list[float] = []
result: list[float] = [-1] * arr_size
for index in reversed(range(arr_size)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
result[index] = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
setup = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| INF = float("inf")
class Dinic:
def __init__(self, n):
self.lvl = [0] * n
self.ptr = [0] * n
self.q = [0] * n
self.adj = [[] for _ in range(n)]
"""
Here we will add our edges containing with the following parameters:
vertex closest to source, vertex closest to sink and flow capacity
through that edge ...
"""
def add_edge(self, a, b, c, rcap=0):
self.adj[a].append([b, len(self.adj[b]), c, 0])
self.adj[b].append([a, len(self.adj[a]) - 1, rcap, 0])
# This is a sample depth first search to be used at max_flow
def depth_first_search(self, vertex, sink, flow):
if vertex == sink or not flow:
return flow
for i in range(self.ptr[vertex], len(self.adj[vertex])):
e = self.adj[vertex][i]
if self.lvl[e[0]] == self.lvl[vertex] + 1:
p = self.depth_first_search(e[0], sink, min(flow, e[2] - e[3]))
if p:
self.adj[vertex][i][3] += p
self.adj[e[0]][e[1]][3] -= p
return p
self.ptr[vertex] = self.ptr[vertex] + 1
return 0
# Here we calculate the flow that reaches the sink
def max_flow(self, source, sink):
flow, self.q[0] = 0, source
for l in range(31): # noqa: E741 l = 30 maybe faster for random data
while True:
self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
qi, qe, self.lvl[source] = 0, 1, 1
while qi < qe and not self.lvl[sink]:
v = self.q[qi]
qi += 1
for e in self.adj[v]:
if not self.lvl[e[0]] and (e[2] - e[3]) >> (30 - l):
self.q[qe] = e[0]
qe += 1
self.lvl[e[0]] = self.lvl[v] + 1
p = self.depth_first_search(source, sink, INF)
while p:
flow += p
p = self.depth_first_search(source, sink, INF)
if not self.lvl[sink]:
break
return flow
# Example to use
"""
Will be a bipartite graph, than it has the vertices near the source(4)
and the vertices near the sink(4)
"""
# Here we make a graphs with 10 vertex(source and sink includes)
graph = Dinic(10)
source = 0
sink = 9
"""
Now we add the vertices next to the font in the font with 1 capacity in this edge
(source -> source vertices)
"""
for vertex in range(1, 5):
graph.add_edge(source, vertex, 1)
"""
We will do the same thing for the vertices near the sink, but from vertex to sink
(sink vertices -> sink)
"""
for vertex in range(5, 9):
graph.add_edge(vertex, sink, 1)
"""
Finally we add the verices near the sink to the vertices near the source.
(source vertices -> sink vertices)
"""
for vertex in range(1, 5):
graph.add_edge(vertex, vertex + 4, 1)
# Now we can know that is the maximum flow(source -> sink)
print(graph.max_flow(source, sink))
| INF = float("inf")
class Dinic:
def __init__(self, n):
self.lvl = [0] * n
self.ptr = [0] * n
self.q = [0] * n
self.adj = [[] for _ in range(n)]
"""
Here we will add our edges containing with the following parameters:
vertex closest to source, vertex closest to sink and flow capacity
through that edge ...
"""
def add_edge(self, a, b, c, rcap=0):
self.adj[a].append([b, len(self.adj[b]), c, 0])
self.adj[b].append([a, len(self.adj[a]) - 1, rcap, 0])
# This is a sample depth first search to be used at max_flow
def depth_first_search(self, vertex, sink, flow):
if vertex == sink or not flow:
return flow
for i in range(self.ptr[vertex], len(self.adj[vertex])):
e = self.adj[vertex][i]
if self.lvl[e[0]] == self.lvl[vertex] + 1:
p = self.depth_first_search(e[0], sink, min(flow, e[2] - e[3]))
if p:
self.adj[vertex][i][3] += p
self.adj[e[0]][e[1]][3] -= p
return p
self.ptr[vertex] = self.ptr[vertex] + 1
return 0
# Here we calculate the flow that reaches the sink
def max_flow(self, source, sink):
flow, self.q[0] = 0, source
for l in range(31): # noqa: E741 l = 30 maybe faster for random data
while True:
self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q)
qi, qe, self.lvl[source] = 0, 1, 1
while qi < qe and not self.lvl[sink]:
v = self.q[qi]
qi += 1
for e in self.adj[v]:
if not self.lvl[e[0]] and (e[2] - e[3]) >> (30 - l):
self.q[qe] = e[0]
qe += 1
self.lvl[e[0]] = self.lvl[v] + 1
p = self.depth_first_search(source, sink, INF)
while p:
flow += p
p = self.depth_first_search(source, sink, INF)
if not self.lvl[sink]:
break
return flow
# Example to use
"""
Will be a bipartite graph, than it has the vertices near the source(4)
and the vertices near the sink(4)
"""
# Here we make a graphs with 10 vertex(source and sink includes)
graph = Dinic(10)
source = 0
sink = 9
"""
Now we add the vertices next to the font in the font with 1 capacity in this edge
(source -> source vertices)
"""
for vertex in range(1, 5):
graph.add_edge(source, vertex, 1)
"""
We will do the same thing for the vertices near the sink, but from vertex to sink
(sink vertices -> sink)
"""
for vertex in range(5, 9):
graph.add_edge(vertex, sink, 1)
"""
Finally we add the verices near the sink to the vertices near the source.
(source vertices -> sink vertices)
"""
for vertex in range(1, 5):
graph.add_edge(vertex, vertex + 4, 1)
# Now we can know that is the maximum flow(source -> sink)
print(graph.max_flow(source, sink))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
This is a pure Python implementation of the greedy-merge-sort algorithm
reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/
For doctests run following command:
python3 -m doctest -v greedy_merge_sort.py
Objective
Merge a set of sorted files of different length into a single sorted file.
We need to find an optimal solution, where the resultant file
will be generated in minimum time.
Approach
If the number of sorted files are given, there are many ways
to merge them into a single sorted file.
This merge can be performed pair wise.
To merge a m-record file and a n-record file requires possibly m+n record moves
the optimal choice being,
merge the two smallest files together at each step (greedy approach).
"""
def optimal_merge_pattern(files: list) -> float:
"""Function to merge all the files with optimum cost
Args:
files [list]: A list of sizes of different files to be merged
Returns:
optimal_merge_cost [int]: Optimal cost to merge all those files
Examples:
>>> optimal_merge_pattern([2, 3, 4])
14
>>> optimal_merge_pattern([5, 10, 20, 30, 30])
205
>>> optimal_merge_pattern([8, 8, 8, 8, 8])
96
"""
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
min_index = files.index(min(files))
temp += files[min_index]
files.pop(min_index)
files.append(temp)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
This is a pure Python implementation of the greedy-merge-sort algorithm
reference: https://www.geeksforgeeks.org/optimal-file-merge-patterns/
For doctests run following command:
python3 -m doctest -v greedy_merge_sort.py
Objective
Merge a set of sorted files of different length into a single sorted file.
We need to find an optimal solution, where the resultant file
will be generated in minimum time.
Approach
If the number of sorted files are given, there are many ways
to merge them into a single sorted file.
This merge can be performed pair wise.
To merge a m-record file and a n-record file requires possibly m+n record moves
the optimal choice being,
merge the two smallest files together at each step (greedy approach).
"""
def optimal_merge_pattern(files: list) -> float:
"""Function to merge all the files with optimum cost
Args:
files [list]: A list of sizes of different files to be merged
Returns:
optimal_merge_cost [int]: Optimal cost to merge all those files
Examples:
>>> optimal_merge_pattern([2, 3, 4])
14
>>> optimal_merge_pattern([5, 10, 20, 30, 30])
205
>>> optimal_merge_pattern([8, 8, 8, 8, 8])
96
"""
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
# Consider two files with minimum cost to be merged
for _ in range(2):
min_index = files.index(min(files))
temp += files[min_index]
files.pop(min_index)
files.append(temp)
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
This algorithm (k=33) was first reported by Dan Bernstein many years ago in comp.lang.c
Another version of this algorithm (now favored by Bernstein) uses xor:
hash(i) = hash(i - 1) * 33 ^ str[i];
First Magic constant 33:
It has never been adequately explained.
It's magic because it works better than many other constants, prime or not.
Second Magic Constant 5381:
1. odd number
2. prime number
3. deficient number
4. 001/010/100/000/101 b
source: http://www.cse.yorku.ca/~oz/hash.html
"""
def djb2(s: str) -> int:
"""
Implementation of djb2 hash algorithm that
is popular because of it's magic constants.
>>> djb2('Algorithms')
3782405311
>>> djb2('scramble bits')
1609059040
"""
hash_value = 5381
for x in s:
hash_value = ((hash_value << 5) + hash_value) + ord(x)
return hash_value & 0xFFFFFFFF
| """
This algorithm (k=33) was first reported by Dan Bernstein many years ago in comp.lang.c
Another version of this algorithm (now favored by Bernstein) uses xor:
hash(i) = hash(i - 1) * 33 ^ str[i];
First Magic constant 33:
It has never been adequately explained.
It's magic because it works better than many other constants, prime or not.
Second Magic Constant 5381:
1. odd number
2. prime number
3. deficient number
4. 001/010/100/000/101 b
source: http://www.cse.yorku.ca/~oz/hash.html
"""
def djb2(s: str) -> int:
"""
Implementation of djb2 hash algorithm that
is popular because of it's magic constants.
>>> djb2('Algorithms')
3782405311
>>> djb2('scramble bits')
1609059040
"""
hash_value = 5381
for x in s:
hash_value = ((hash_value << 5) + hash_value) + ord(x)
return hash_value & 0xFFFFFFFF
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Arithmetic mean
Reference: https://en.wikipedia.org/wiki/Arithmetic_mean
Arithmetic series
Reference: https://en.wikipedia.org/wiki/Arithmetic_series
(The URL above will redirect you to arithmetic progression)
"""
def is_arithmetic_series(series: list) -> bool:
"""
checking whether the input series is arithmetic series or not
>>> is_arithmetic_series([2, 4, 6])
True
>>> is_arithmetic_series([3, 6, 12, 24])
False
>>> is_arithmetic_series([1, 2, 3])
True
>>> is_arithmetic_series(4)
Traceback (most recent call last):
...
ValueError: Input series is not valid, valid series - [2, 4, 6]
>>> is_arithmetic_series([])
Traceback (most recent call last):
...
ValueError: Input list must be a non empty list
"""
if not isinstance(series, list):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
if len(series) == 1:
return True
common_diff = series[1] - series[0]
for index in range(len(series) - 1):
if series[index + 1] - series[index] != common_diff:
return False
return True
def arithmetic_mean(series: list) -> float:
"""
return the arithmetic mean of series
>>> arithmetic_mean([2, 4, 6])
4.0
>>> arithmetic_mean([3, 6, 9, 12])
7.5
>>> arithmetic_mean(4)
Traceback (most recent call last):
...
ValueError: Input series is not valid, valid series - [2, 4, 6]
>>> arithmetic_mean([4, 8, 1])
4.333333333333333
>>> arithmetic_mean([1, 2, 3])
2.0
>>> arithmetic_mean([])
Traceback (most recent call last):
...
ValueError: Input list must be a non empty list
"""
if not isinstance(series, list):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
answer = 0
for val in series:
answer += val
return answer / len(series)
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Arithmetic mean
Reference: https://en.wikipedia.org/wiki/Arithmetic_mean
Arithmetic series
Reference: https://en.wikipedia.org/wiki/Arithmetic_series
(The URL above will redirect you to arithmetic progression)
"""
def is_arithmetic_series(series: list) -> bool:
"""
checking whether the input series is arithmetic series or not
>>> is_arithmetic_series([2, 4, 6])
True
>>> is_arithmetic_series([3, 6, 12, 24])
False
>>> is_arithmetic_series([1, 2, 3])
True
>>> is_arithmetic_series(4)
Traceback (most recent call last):
...
ValueError: Input series is not valid, valid series - [2, 4, 6]
>>> is_arithmetic_series([])
Traceback (most recent call last):
...
ValueError: Input list must be a non empty list
"""
if not isinstance(series, list):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
if len(series) == 1:
return True
common_diff = series[1] - series[0]
for index in range(len(series) - 1):
if series[index + 1] - series[index] != common_diff:
return False
return True
def arithmetic_mean(series: list) -> float:
"""
return the arithmetic mean of series
>>> arithmetic_mean([2, 4, 6])
4.0
>>> arithmetic_mean([3, 6, 9, 12])
7.5
>>> arithmetic_mean(4)
Traceback (most recent call last):
...
ValueError: Input series is not valid, valid series - [2, 4, 6]
>>> arithmetic_mean([4, 8, 1])
4.333333333333333
>>> arithmetic_mean([1, 2, 3])
2.0
>>> arithmetic_mean([])
Traceback (most recent call last):
...
ValueError: Input list must be a non empty list
"""
if not isinstance(series, list):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]")
if len(series) == 0:
raise ValueError("Input list must be a non empty list")
answer = 0
for val in series:
answer += val
return answer / len(series)
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # Python program to implement Pigeonhole Sorting in python
# Algorithm for the pigeonhole sorting
def pigeonhole_sort(a):
"""
>>> a = [8, 3, 2, 7, 4, 6, 8]
>>> b = sorted(a) # a nondestructive sort
>>> pigeonhole_sort(a) # a destructive sort
>>> a == b
True
"""
# size of range of values in the list (ie, number of pigeonholes we need)
min_val = min(a) # min() finds the minimum value
max_val = max(a) # max() finds the maximum value
size = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
holes = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(x, int), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
a[i] = count + min_val
i += 1
def main():
a = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a)
print("Sorted order is:", " ".join(a))
if __name__ == "__main__":
main()
| # Python program to implement Pigeonhole Sorting in python
# Algorithm for the pigeonhole sorting
def pigeonhole_sort(a):
"""
>>> a = [8, 3, 2, 7, 4, 6, 8]
>>> b = sorted(a) # a nondestructive sort
>>> pigeonhole_sort(a) # a destructive sort
>>> a == b
True
"""
# size of range of values in the list (ie, number of pigeonholes we need)
min_val = min(a) # min() finds the minimum value
max_val = max(a) # max() finds the maximum value
size = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
holes = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(x, int), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
i = 0
for count in range(size):
while holes[count] > 0:
holes[count] -= 1
a[i] = count + min_val
i += 1
def main():
a = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a)
print("Sorted order is:", " ".join(a))
if __name__ == "__main__":
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| def bin_to_decimal(bin_string: str) -> int:
"""
Convert a binary value to its decimal equivalent
>>> bin_to_decimal("101")
5
>>> bin_to_decimal(" 1010 ")
10
>>> bin_to_decimal("-11101")
-29
>>> bin_to_decimal("0")
0
>>> bin_to_decimal("a")
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
>>> bin_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> bin_to_decimal("39")
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
"""
bin_string = str(bin_string).strip()
if not bin_string:
raise ValueError("Empty string was passed to the function")
is_negative = bin_string[0] == "-"
if is_negative:
bin_string = bin_string[1:]
if not all(char in "01" for char in bin_string):
raise ValueError("Non-binary value was passed to the function")
decimal_number = 0
for char in bin_string:
decimal_number = 2 * decimal_number + int(char)
return -decimal_number if is_negative else decimal_number
if __name__ == "__main__":
from doctest import testmod
testmod()
| def bin_to_decimal(bin_string: str) -> int:
"""
Convert a binary value to its decimal equivalent
>>> bin_to_decimal("101")
5
>>> bin_to_decimal(" 1010 ")
10
>>> bin_to_decimal("-11101")
-29
>>> bin_to_decimal("0")
0
>>> bin_to_decimal("a")
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
>>> bin_to_decimal("")
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
>>> bin_to_decimal("39")
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
"""
bin_string = str(bin_string).strip()
if not bin_string:
raise ValueError("Empty string was passed to the function")
is_negative = bin_string[0] == "-"
if is_negative:
bin_string = bin_string[1:]
if not all(char in "01" for char in bin_string):
raise ValueError("Non-binary value was passed to the function")
decimal_number = 0
for char in bin_string:
decimal_number = 2 * decimal_number + int(char)
return -decimal_number if is_negative else decimal_number
if __name__ == "__main__":
from doctest import testmod
testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Sieve of Eratosthones
The sieve of Eratosthenes is an algorithm used to find prime numbers, less than or
equal to a given value.
Illustration:
https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif
Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich)
Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem
"""
from __future__ import annotations
import math
def prime_sieve(num: int) -> list[int]:
"""
Returns a list with all prime numbers up to n.
>>> prime_sieve(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> prime_sieve(25)
[2, 3, 5, 7, 11, 13, 17, 19, 23]
>>> prime_sieve(10)
[2, 3, 5, 7]
>>> prime_sieve(9)
[2, 3, 5, 7]
>>> prime_sieve(2)
[2]
>>> prime_sieve(1)
[]
"""
if num <= 0:
raise ValueError(f"{num}: Invalid input, please enter a positive integer.")
sieve = [True] * (num + 1)
prime = []
start = 2
end = int(math.sqrt(num))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(start)
# Set multiples of start be False
for i in range(start * start, num + 1, start):
if sieve[i] is True:
sieve[i] = False
start += 1
for j in range(end + 1, num + 1):
if sieve[j] is True:
prime.append(j)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| """
Sieve of Eratosthones
The sieve of Eratosthenes is an algorithm used to find prime numbers, less than or
equal to a given value.
Illustration:
https://upload.wikimedia.org/wikipedia/commons/b/b9/Sieve_of_Eratosthenes_animation.gif
Reference: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
doctest provider: Bruno Simas Hadlich (https://github.com/brunohadlich)
Also thanks to Dmitry (https://github.com/LizardWizzard) for finding the problem
"""
from __future__ import annotations
import math
def prime_sieve(num: int) -> list[int]:
"""
Returns a list with all prime numbers up to n.
>>> prime_sieve(50)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47]
>>> prime_sieve(25)
[2, 3, 5, 7, 11, 13, 17, 19, 23]
>>> prime_sieve(10)
[2, 3, 5, 7]
>>> prime_sieve(9)
[2, 3, 5, 7]
>>> prime_sieve(2)
[2]
>>> prime_sieve(1)
[]
"""
if num <= 0:
raise ValueError(f"{num}: Invalid input, please enter a positive integer.")
sieve = [True] * (num + 1)
prime = []
start = 2
end = int(math.sqrt(num))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(start)
# Set multiples of start be False
for i in range(start * start, num + 1, start):
if sieve[i] is True:
sieve[i] = False
start += 1
for j in range(end + 1, num + 1):
if sieve[j] is True:
prime.append(j)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| #!/usr/bin/env python3
"""
Build a simple bare-minimum quantum circuit that starts with a single
qubit (by default, in state 0) and inverts it. Run the experiment 1000
times and print the total count of the states finally observed.
Qiskit Docs: https://qiskit.org/documentation/getting_started.html
"""
import qiskit as q
def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts:
"""
>>> single_qubit_measure(2, 2)
{'11': 1000}
>>> single_qubit_measure(4, 4)
{'0011': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
# Create a Quantum Circuit acting on the q register
circuit = q.QuantumCircuit(qubits, classical_bits)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1])
# Execute the circuit on the qasm simulator
job = q.execute(circuit, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(circuit)
if __name__ == "__main__":
counts = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| #!/usr/bin/env python3
"""
Build a simple bare-minimum quantum circuit that starts with a single
qubit (by default, in state 0) and inverts it. Run the experiment 1000
times and print the total count of the states finally observed.
Qiskit Docs: https://qiskit.org/documentation/getting_started.html
"""
import qiskit as q
def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts:
"""
>>> single_qubit_measure(2, 2)
{'11': 1000}
>>> single_qubit_measure(4, 4)
{'0011': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
# Create a Quantum Circuit acting on the q register
circuit = q.QuantumCircuit(qubits, classical_bits)
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0)
circuit.x(1)
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1])
# Execute the circuit on the qasm simulator
job = q.execute(circuit, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(circuit)
if __name__ == "__main__":
counts = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import cv2
import numpy as np
"""
Harris Corner Detector
https://en.wikipedia.org/wiki/Harris_Corner_Detector
"""
class HarrisCorner:
def __init__(self, k: float, window_size: int):
"""
k : is an empirically determined constant in [0.04,0.06]
window_size : neighbourhoods considered
"""
if k in (0.04, 0.06):
self.k = k
self.window_size = window_size
else:
raise ValueError("invalid k value")
def __str__(self) -> str:
return f"Harris Corner detection with k : {self.k}"
def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]:
"""
Returns the image with corners identified
img_path : path of the image
output : list of the corner positions, image
"""
img = cv2.imread(img_path, 0)
h, w = img.shape
corner_list: list[list[int]] = []
color_img = img.copy()
color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
dy, dx = np.gradient(img)
ixx = dx**2
iyy = dy**2
ixy = dx * dy
k = 0.04
offset = self.window_size // 2
for y in range(offset, h - offset):
for x in range(offset, w - offset):
wxx = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
wyy = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
wxy = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
det = (wxx * wyy) - (wxy**2)
trace = wxx + wyy
r = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0), 0)
color_img.itemset((y, x, 1), 0)
color_img.itemset((y, x, 2), 255)
return color_img, corner_list
if __name__ == "__main__":
edge_detect = HarrisCorner(0.04, 3)
color_img, _ = edge_detect.detect("path_to_image")
cv2.imwrite("detect.png", color_img)
| import cv2
import numpy as np
"""
Harris Corner Detector
https://en.wikipedia.org/wiki/Harris_Corner_Detector
"""
class HarrisCorner:
def __init__(self, k: float, window_size: int):
"""
k : is an empirically determined constant in [0.04,0.06]
window_size : neighbourhoods considered
"""
if k in (0.04, 0.06):
self.k = k
self.window_size = window_size
else:
raise ValueError("invalid k value")
def __str__(self) -> str:
return f"Harris Corner detection with k : {self.k}"
def detect(self, img_path: str) -> tuple[cv2.Mat, list[list[int]]]:
"""
Returns the image with corners identified
img_path : path of the image
output : list of the corner positions, image
"""
img = cv2.imread(img_path, 0)
h, w = img.shape
corner_list: list[list[int]] = []
color_img = img.copy()
color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB)
dy, dx = np.gradient(img)
ixx = dx**2
iyy = dy**2
ixy = dx * dy
k = 0.04
offset = self.window_size // 2
for y in range(offset, h - offset):
for x in range(offset, w - offset):
wxx = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
wyy = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
wxy = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
det = (wxx * wyy) - (wxy**2)
trace = wxx + wyy
r = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0), 0)
color_img.itemset((y, x, 1), 0)
color_img.itemset((y, x, 2), 255)
return color_img, corner_list
if __name__ == "__main__":
edge_detect = HarrisCorner(0.04, 3)
color_img, _ = edge_detect.detect("path_to_image")
cv2.imwrite("detect.png", color_img)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
You have m types of coins available in infinite quantities
where the value of each coins is given in the array S=[S0,... Sm-1]
Can you determine number of ways of making change for n units using
the given types of coins?
https://www.hackerrank.com/challenges/coin-change/problem
"""
def dp_count(s, n):
"""
>>> dp_count([1, 2, 3], 4)
4
>>> dp_count([1, 2, 3], 7)
8
>>> dp_count([2, 5, 3, 6], 10)
5
>>> dp_count([10], 99)
0
>>> dp_count([4, 5, 6], 0)
1
>>> dp_count([1, 2, 3], -5)
0
"""
if n < 0:
return 0
# table[i] represents the number of ways to get to amount i
table = [0] * (n + 1)
# There is exactly 1 way to get to zero(You pick no coins).
table[0] = 1
# Pick all coins one by one and update table[] values
# after the index greater than or equal to the value of the
# picked coin
for coin_val in s:
for j in range(coin_val, n + 1):
table[j] += table[j - coin_val]
return table[n]
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
You have m types of coins available in infinite quantities
where the value of each coins is given in the array S=[S0,... Sm-1]
Can you determine number of ways of making change for n units using
the given types of coins?
https://www.hackerrank.com/challenges/coin-change/problem
"""
def dp_count(s, n):
"""
>>> dp_count([1, 2, 3], 4)
4
>>> dp_count([1, 2, 3], 7)
8
>>> dp_count([2, 5, 3, 6], 10)
5
>>> dp_count([10], 99)
0
>>> dp_count([4, 5, 6], 0)
1
>>> dp_count([1, 2, 3], -5)
0
"""
if n < 0:
return 0
# table[i] represents the number of ways to get to amount i
table = [0] * (n + 1)
# There is exactly 1 way to get to zero(You pick no coins).
table[0] = 1
# Pick all coins one by one and update table[] values
# after the index greater than or equal to the value of the
# picked coin
for coin_val in s:
for j in range(coin_val, n + 1):
table[j] += table[j - coin_val]
return table[n]
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
https://en.wikipedia.org/wiki/Doubly_linked_list
"""
class Node:
def __init__(self, data):
self.data = data
self.previous = None
self.next = None
def __str__(self):
return f"{self.data}"
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_head('b')
>>> linked_list.insert_at_head('a')
>>> linked_list.insert_at_tail('c')
>>> tuple(linked_list)
('a', 'b', 'c')
"""
node = self.head
while node:
yield node.data
node = node.next
def __str__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_tail('a')
>>> linked_list.insert_at_tail('b')
>>> linked_list.insert_at_tail('c')
>>> str(linked_list)
'a->b->c'
"""
return "->".join([str(item) for item in self])
def __len__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> for i in range(0, 5):
... linked_list.insert_at_nth(i, i + 1)
>>> len(linked_list) == 5
True
"""
return len(tuple(iter(self)))
def insert_at_head(self, data):
self.insert_at_nth(0, data)
def insert_at_tail(self, data):
self.insert_at_nth(len(self), data)
def insert_at_nth(self, index: int, data):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_nth(-1, 666)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> linked_list.insert_at_nth(1, 666)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> linked_list.insert_at_nth(0, 2)
>>> linked_list.insert_at_nth(0, 1)
>>> linked_list.insert_at_nth(2, 4)
>>> linked_list.insert_at_nth(2, 3)
>>> str(linked_list)
'1->2->3->4'
>>> linked_list.insert_at_nth(5, 5)
Traceback (most recent call last):
....
IndexError: list index out of range
"""
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
new_node = Node(data)
if self.head is None:
self.head = self.tail = new_node
elif index == 0:
self.head.previous = new_node
new_node.next = self.head
self.head = new_node
elif index == len(self):
self.tail.next = new_node
new_node.previous = self.tail
self.tail = new_node
else:
temp = self.head
for _ in range(0, index):
temp = temp.next
temp.previous.next = new_node
new_node.previous = temp.previous
new_node.next = temp
temp.previous = new_node
def delete_head(self):
return self.delete_at_nth(0)
def delete_tail(self):
return self.delete_at_nth(len(self) - 1)
def delete_at_nth(self, index: int):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.delete_at_nth(0)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> for i in range(0, 5):
... linked_list.insert_at_nth(i, i + 1)
>>> linked_list.delete_at_nth(0) == 1
True
>>> linked_list.delete_at_nth(3) == 5
True
>>> linked_list.delete_at_nth(1) == 3
True
>>> str(linked_list)
'2->4'
>>> linked_list.delete_at_nth(2)
Traceback (most recent call last):
....
IndexError: list index out of range
"""
if not 0 <= index <= len(self) - 1:
raise IndexError("list index out of range")
delete_node = self.head # default first node
if len(self) == 1:
self.head = self.tail = None
elif index == 0:
self.head = self.head.next
self.head.previous = None
elif index == len(self) - 1:
delete_node = self.tail
self.tail = self.tail.previous
self.tail.next = None
else:
temp = self.head
for _ in range(0, index):
temp = temp.next
delete_node = temp
temp.next.previous = temp.previous
temp.previous.next = temp.next
return delete_node.data
def delete(self, data) -> str:
current = self.head
while current.data != data: # Find the position to delete
if current.next:
current = current.next
else: # We have reached the end an no value matches
return "No data matching given value"
if current == self.head:
self.delete_head()
elif current == self.tail:
self.delete_tail()
else: # Before: 1 <--> 2(current) <--> 3
current.previous.next = current.next # 1 --> 3
current.next.previous = current.previous # 1 <--> 3
return data
def is_empty(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.is_empty()
True
>>> linked_list.insert_at_tail(1)
>>> linked_list.is_empty()
False
"""
return len(self) == 0
def test_doubly_linked_list() -> None:
"""
>>> test_doubly_linked_list()
"""
linked_list = DoublyLinkedList()
assert linked_list.is_empty() is True
assert str(linked_list) == ""
try:
linked_list.delete_head()
raise AssertionError() # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError() # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(linked_list) == i
linked_list.insert_at_nth(i, i + 1)
assert str(linked_list) == "->".join(str(i) for i in range(1, 11))
linked_list.insert_at_head(0)
linked_list.insert_at_tail(11)
assert str(linked_list) == "->".join(str(i) for i in range(0, 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_at_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(linked_list) == 9
assert str(linked_list) == "->".join(str(i) for i in range(1, 10))
if __name__ == "__main__":
from doctest import testmod
testmod()
| """
https://en.wikipedia.org/wiki/Doubly_linked_list
"""
class Node:
def __init__(self, data):
self.data = data
self.previous = None
self.next = None
def __str__(self):
return f"{self.data}"
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_head('b')
>>> linked_list.insert_at_head('a')
>>> linked_list.insert_at_tail('c')
>>> tuple(linked_list)
('a', 'b', 'c')
"""
node = self.head
while node:
yield node.data
node = node.next
def __str__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_tail('a')
>>> linked_list.insert_at_tail('b')
>>> linked_list.insert_at_tail('c')
>>> str(linked_list)
'a->b->c'
"""
return "->".join([str(item) for item in self])
def __len__(self):
"""
>>> linked_list = DoublyLinkedList()
>>> for i in range(0, 5):
... linked_list.insert_at_nth(i, i + 1)
>>> len(linked_list) == 5
True
"""
return len(tuple(iter(self)))
def insert_at_head(self, data):
self.insert_at_nth(0, data)
def insert_at_tail(self, data):
self.insert_at_nth(len(self), data)
def insert_at_nth(self, index: int, data):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.insert_at_nth(-1, 666)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> linked_list.insert_at_nth(1, 666)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> linked_list.insert_at_nth(0, 2)
>>> linked_list.insert_at_nth(0, 1)
>>> linked_list.insert_at_nth(2, 4)
>>> linked_list.insert_at_nth(2, 3)
>>> str(linked_list)
'1->2->3->4'
>>> linked_list.insert_at_nth(5, 5)
Traceback (most recent call last):
....
IndexError: list index out of range
"""
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
new_node = Node(data)
if self.head is None:
self.head = self.tail = new_node
elif index == 0:
self.head.previous = new_node
new_node.next = self.head
self.head = new_node
elif index == len(self):
self.tail.next = new_node
new_node.previous = self.tail
self.tail = new_node
else:
temp = self.head
for _ in range(0, index):
temp = temp.next
temp.previous.next = new_node
new_node.previous = temp.previous
new_node.next = temp
temp.previous = new_node
def delete_head(self):
return self.delete_at_nth(0)
def delete_tail(self):
return self.delete_at_nth(len(self) - 1)
def delete_at_nth(self, index: int):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.delete_at_nth(0)
Traceback (most recent call last):
....
IndexError: list index out of range
>>> for i in range(0, 5):
... linked_list.insert_at_nth(i, i + 1)
>>> linked_list.delete_at_nth(0) == 1
True
>>> linked_list.delete_at_nth(3) == 5
True
>>> linked_list.delete_at_nth(1) == 3
True
>>> str(linked_list)
'2->4'
>>> linked_list.delete_at_nth(2)
Traceback (most recent call last):
....
IndexError: list index out of range
"""
if not 0 <= index <= len(self) - 1:
raise IndexError("list index out of range")
delete_node = self.head # default first node
if len(self) == 1:
self.head = self.tail = None
elif index == 0:
self.head = self.head.next
self.head.previous = None
elif index == len(self) - 1:
delete_node = self.tail
self.tail = self.tail.previous
self.tail.next = None
else:
temp = self.head
for _ in range(0, index):
temp = temp.next
delete_node = temp
temp.next.previous = temp.previous
temp.previous.next = temp.next
return delete_node.data
def delete(self, data) -> str:
current = self.head
while current.data != data: # Find the position to delete
if current.next:
current = current.next
else: # We have reached the end an no value matches
return "No data matching given value"
if current == self.head:
self.delete_head()
elif current == self.tail:
self.delete_tail()
else: # Before: 1 <--> 2(current) <--> 3
current.previous.next = current.next # 1 --> 3
current.next.previous = current.previous # 1 <--> 3
return data
def is_empty(self):
"""
>>> linked_list = DoublyLinkedList()
>>> linked_list.is_empty()
True
>>> linked_list.insert_at_tail(1)
>>> linked_list.is_empty()
False
"""
return len(self) == 0
def test_doubly_linked_list() -> None:
"""
>>> test_doubly_linked_list()
"""
linked_list = DoublyLinkedList()
assert linked_list.is_empty() is True
assert str(linked_list) == ""
try:
linked_list.delete_head()
raise AssertionError() # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError() # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(linked_list) == i
linked_list.insert_at_nth(i, i + 1)
assert str(linked_list) == "->".join(str(i) for i in range(1, 11))
linked_list.insert_at_head(0)
linked_list.insert_at_tail(11)
assert str(linked_list) == "->".join(str(i) for i in range(0, 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_at_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(linked_list) == 9
assert str(linked_list) == "->".join(str(i) for i in range(1, 10))
if __name__ == "__main__":
from doctest import testmod
testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| #!/usr/bin/env python3
from .hash_table import HashTable
class QuadraticProbing(HashTable):
"""
Basic Hash Table example with open addressing using Quadratic Probing
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _collision_resolution(self, key, data=None):
i = 1
new_key = self.hash_function(key + i * i)
while self.values[new_key] is not None and self.values[new_key] != key:
i += 1
new_key = (
self.hash_function(key + i * i)
if not self.balanced_factor() >= self.lim_charge
else None
)
if new_key is None:
break
return new_key
| #!/usr/bin/env python3
from .hash_table import HashTable
class QuadraticProbing(HashTable):
"""
Basic Hash Table example with open addressing using Quadratic Probing
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _collision_resolution(self, key, data=None):
i = 1
new_key = self.hash_function(key + i * i)
while self.values[new_key] is not None and self.values[new_key] != key:
i += 1
new_key = (
self.hash_function(key + i * i)
if not self.balanced_factor() >= self.lim_charge
else None
)
if new_key is None:
break
return new_key
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| #!/usr/bin/env python3
"""
Deutsch-Josza Algorithm is one of the first examples of a quantum
algorithm that is exponentially faster than any possible deterministic
classical algorithm
Premise:
We are given a hidden Boolean function f,
which takes as input a string of bits, and returns either 0 or 1:
f({x0,x1,x2,...}) -> 0 or 1, where xn is 0 or 1
The property of the given Boolean function is that it is guaranteed to
either be balanced or constant. A constant function returns all 0's
or all 1's for any input, while a balanced function returns 0's for
exactly half of all inputs and 1's for the other half. Our task is to
determine whether the given function is balanced or constant.
References:
- https://en.wikipedia.org/wiki/Deutsch-Jozsa_algorithm
- https://qiskit.org/textbook/ch-algorithms/deutsch-jozsa.html
"""
import numpy as np
import qiskit as q
def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit:
"""
Returns a Quantum Circuit for the Oracle function.
The circuit returned can represent balanced or constant function,
according to the arguments passed
"""
# This circuit has num_qubits+1 qubits: the size of the input,
# plus one output qubit
oracle_qc = q.QuantumCircuit(num_qubits + 1)
# First, let's deal with the case in which oracle is balanced
if case == "balanced":
# First generate a random number that tells us which CNOTs to
# wrap in X-gates:
b = np.random.randint(1, 2**num_qubits)
# Next, format 'b' as a binary string of length 'n', padded with zeros:
b_str = format(b, f"0{num_qubits}b")
# Next, we place the first X-gates. Each digit in our binary string
# correspopnds to a qubit, if the digit is 0, we do nothing, if it's 1
# we apply an X-gate to that qubit:
for index, bit in enumerate(b_str):
if bit == "1":
oracle_qc.x(index)
# Do the controlled-NOT gates for each qubit, using the output qubit
# as the target:
for index in range(num_qubits):
oracle_qc.cx(index, num_qubits)
# Next, place the final X-gates
for index, bit in enumerate(b_str):
if bit == "1":
oracle_qc.x(index)
# Case in which oracle is constant
if case == "constant":
# First decide what the fixed output of the oracle will be
# (either always 0 or always 1)
output = np.random.randint(2)
if output == 1:
oracle_qc.x(num_qubits)
oracle_gate = oracle_qc.to_gate()
oracle_gate.name = "Oracle" # To show when we display the circuit
return oracle_gate
def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit:
"""
Returns the complete Deustch-Jozsa Quantum Circuit,
adding Input & Output registers and Hadamard & Measurement Gates,
to the Oracle Circuit passed in arguments
"""
dj_circuit = q.QuantumCircuit(num_qubits + 1, num_qubits)
# Set up the output qubit:
dj_circuit.x(num_qubits)
dj_circuit.h(num_qubits)
# And set up the input register:
for qubit in range(num_qubits):
dj_circuit.h(qubit)
# Let's append the oracle gate to our circuit:
dj_circuit.append(oracle, range(num_qubits + 1))
# Finally, perform the H-gates again and measure:
for qubit in range(num_qubits):
dj_circuit.h(qubit)
for i in range(num_qubits):
dj_circuit.measure(i, i)
return dj_circuit
def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts:
"""
Main function that builds the circuit using other helper functions,
runs the experiment 1000 times & returns the resultant qubit counts
>>> deutsch_jozsa("constant", 3)
{'000': 1000}
>>> deutsch_jozsa("balanced", 3)
{'111': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
oracle_gate = dj_oracle(case, num_qubits)
dj_circuit = dj_algorithm(oracle_gate, num_qubits)
# Execute the circuit on the qasm simulator
job = q.execute(dj_circuit, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(dj_circuit)
if __name__ == "__main__":
print(f"Deutsch Jozsa - Constant Oracle: {deutsch_jozsa('constant', 3)}")
print(f"Deutsch Jozsa - Balanced Oracle: {deutsch_jozsa('balanced', 3)}")
| #!/usr/bin/env python3
"""
Deutsch-Josza Algorithm is one of the first examples of a quantum
algorithm that is exponentially faster than any possible deterministic
classical algorithm
Premise:
We are given a hidden Boolean function f,
which takes as input a string of bits, and returns either 0 or 1:
f({x0,x1,x2,...}) -> 0 or 1, where xn is 0 or 1
The property of the given Boolean function is that it is guaranteed to
either be balanced or constant. A constant function returns all 0's
or all 1's for any input, while a balanced function returns 0's for
exactly half of all inputs and 1's for the other half. Our task is to
determine whether the given function is balanced or constant.
References:
- https://en.wikipedia.org/wiki/Deutsch-Jozsa_algorithm
- https://qiskit.org/textbook/ch-algorithms/deutsch-jozsa.html
"""
import numpy as np
import qiskit as q
def dj_oracle(case: str, num_qubits: int) -> q.QuantumCircuit:
"""
Returns a Quantum Circuit for the Oracle function.
The circuit returned can represent balanced or constant function,
according to the arguments passed
"""
# This circuit has num_qubits+1 qubits: the size of the input,
# plus one output qubit
oracle_qc = q.QuantumCircuit(num_qubits + 1)
# First, let's deal with the case in which oracle is balanced
if case == "balanced":
# First generate a random number that tells us which CNOTs to
# wrap in X-gates:
b = np.random.randint(1, 2**num_qubits)
# Next, format 'b' as a binary string of length 'n', padded with zeros:
b_str = format(b, f"0{num_qubits}b")
# Next, we place the first X-gates. Each digit in our binary string
# correspopnds to a qubit, if the digit is 0, we do nothing, if it's 1
# we apply an X-gate to that qubit:
for index, bit in enumerate(b_str):
if bit == "1":
oracle_qc.x(index)
# Do the controlled-NOT gates for each qubit, using the output qubit
# as the target:
for index in range(num_qubits):
oracle_qc.cx(index, num_qubits)
# Next, place the final X-gates
for index, bit in enumerate(b_str):
if bit == "1":
oracle_qc.x(index)
# Case in which oracle is constant
if case == "constant":
# First decide what the fixed output of the oracle will be
# (either always 0 or always 1)
output = np.random.randint(2)
if output == 1:
oracle_qc.x(num_qubits)
oracle_gate = oracle_qc.to_gate()
oracle_gate.name = "Oracle" # To show when we display the circuit
return oracle_gate
def dj_algorithm(oracle: q.QuantumCircuit, num_qubits: int) -> q.QuantumCircuit:
"""
Returns the complete Deustch-Jozsa Quantum Circuit,
adding Input & Output registers and Hadamard & Measurement Gates,
to the Oracle Circuit passed in arguments
"""
dj_circuit = q.QuantumCircuit(num_qubits + 1, num_qubits)
# Set up the output qubit:
dj_circuit.x(num_qubits)
dj_circuit.h(num_qubits)
# And set up the input register:
for qubit in range(num_qubits):
dj_circuit.h(qubit)
# Let's append the oracle gate to our circuit:
dj_circuit.append(oracle, range(num_qubits + 1))
# Finally, perform the H-gates again and measure:
for qubit in range(num_qubits):
dj_circuit.h(qubit)
for i in range(num_qubits):
dj_circuit.measure(i, i)
return dj_circuit
def deutsch_jozsa(case: str, num_qubits: int) -> q.result.counts.Counts:
"""
Main function that builds the circuit using other helper functions,
runs the experiment 1000 times & returns the resultant qubit counts
>>> deutsch_jozsa("constant", 3)
{'000': 1000}
>>> deutsch_jozsa("balanced", 3)
{'111': 1000}
"""
# Use Aer's qasm_simulator
simulator = q.Aer.get_backend("qasm_simulator")
oracle_gate = dj_oracle(case, num_qubits)
dj_circuit = dj_algorithm(oracle_gate, num_qubits)
# Execute the circuit on the qasm simulator
job = q.execute(dj_circuit, simulator, shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(dj_circuit)
if __name__ == "__main__":
print(f"Deutsch Jozsa - Constant Oracle: {deutsch_jozsa('constant', 3)}")
print(f"Deutsch Jozsa - Balanced Oracle: {deutsch_jozsa('balanced', 3)}")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 129: https://projecteuler.net/problem=129
A number consisting entirely of ones is called a repunit. We shall define R(k) to be
a repunit of length k; for example, R(6) = 111111.
Given that n is a positive integer and GCD(n, 10) = 1, it can be shown that there
always exists a value, k, for which R(k) is divisible by n, and let A(n) be the least
such value of k; for example, A(7) = 6 and A(41) = 5.
The least value of n for which A(n) first exceeds ten is 17.
Find the least value of n for which A(n) first exceeds one-million.
"""
def least_divisible_repunit(divisor: int) -> int:
"""
Return the least value k such that the Repunit of length k is divisible by divisor.
>>> least_divisible_repunit(7)
6
>>> least_divisible_repunit(41)
5
>>> least_divisible_repunit(1234567)
34020
"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
repunit = 1
repunit_index = 1
while repunit:
repunit = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def solution(limit: int = 1000000) -> int:
"""
Return the least value of n for which least_divisible_repunit(n)
first exceeds limit.
>>> solution(10)
17
>>> solution(100)
109
>>> solution(1000)
1017
"""
divisor = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(divisor) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 129: https://projecteuler.net/problem=129
A number consisting entirely of ones is called a repunit. We shall define R(k) to be
a repunit of length k; for example, R(6) = 111111.
Given that n is a positive integer and GCD(n, 10) = 1, it can be shown that there
always exists a value, k, for which R(k) is divisible by n, and let A(n) be the least
such value of k; for example, A(7) = 6 and A(41) = 5.
The least value of n for which A(n) first exceeds ten is 17.
Find the least value of n for which A(n) first exceeds one-million.
"""
def least_divisible_repunit(divisor: int) -> int:
"""
Return the least value k such that the Repunit of length k is divisible by divisor.
>>> least_divisible_repunit(7)
6
>>> least_divisible_repunit(41)
5
>>> least_divisible_repunit(1234567)
34020
"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
repunit = 1
repunit_index = 1
while repunit:
repunit = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def solution(limit: int = 1000000) -> int:
"""
Return the least value of n for which least_divisible_repunit(n)
first exceeds limit.
>>> solution(10)
17
>>> solution(100)
109
>>> solution(1000)
1017
"""
divisor = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(divisor) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| def is_palindrome(head):
if not head:
return True
# split the list to two parts
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
second = slow.next
slow.next = None # Don't forget here! But forget still works!
# reverse the second part
node = None
while second:
nxt = second.next
second.next = node
node = second
second = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
def is_palindrome_stack(head):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
slow = fast = cur = head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
# 2. Push the second half into the stack
stack = [slow.val]
while slow.next:
slow = slow.next
stack.append(slow.val)
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
cur = cur.next
return True
def is_palindrome_dict(head):
if not head or not head.next:
return True
d = {}
pos = 0
while head:
if head.val in d.keys():
d[head.val].append(pos)
else:
d[head.val] = [pos]
head = head.next
pos += 1
checksum = pos - 1
middle = 0
for v in d.values():
if len(v) % 2 != 0:
middle += 1
else:
step = 0
for i in range(0, len(v)):
if v[i] + v[len(v) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| def is_palindrome(head):
if not head:
return True
# split the list to two parts
fast, slow = head.next, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
second = slow.next
slow.next = None # Don't forget here! But forget still works!
# reverse the second part
node = None
while second:
nxt = second.next
second.next = node
node = second
second = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
node = node.next
head = head.next
return True
def is_palindrome_stack(head):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
slow = fast = cur = head
while fast and fast.next:
fast, slow = fast.next.next, slow.next
# 2. Push the second half into the stack
stack = [slow.val]
while slow.next:
slow = slow.next
stack.append(slow.val)
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
cur = cur.next
return True
def is_palindrome_dict(head):
if not head or not head.next:
return True
d = {}
pos = 0
while head:
if head.val in d.keys():
d[head.val].append(pos)
else:
d[head.val] = [pos]
head = head.next
pos += 1
checksum = pos - 1
middle = 0
for v in d.values():
if len(v) % 2 != 0:
middle += 1
else:
step = 0
for i in range(0, len(v)):
if v[i] + v[len(v) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
This script demonstrates the implementation of the Sigmoid function.
The function takes a vector of K real numbers as input and then 1 / (1 + exp(-x)).
After through Sigmoid, the element of the vector mostly 0 between 1. or 1 between -1.
Script inspired from its corresponding Wikipedia article
https://en.wikipedia.org/wiki/Sigmoid_function
"""
import numpy as np
def sigmoid(vector: np.array) -> np.array:
"""
Implements the sigmoid function
Parameters:
vector (np.array): A numpy array of shape (1,n)
consisting of real values
Returns:
sigmoid_vec (np.array): The input numpy array, after applying
sigmoid.
Examples:
>>> sigmoid(np.array([-1.0, 1.0, 2.0]))
array([0.26894142, 0.73105858, 0.88079708])
>>> sigmoid(np.array([0.0]))
array([0.5])
"""
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
This script demonstrates the implementation of the Sigmoid function.
The function takes a vector of K real numbers as input and then 1 / (1 + exp(-x)).
After through Sigmoid, the element of the vector mostly 0 between 1. or 1 between -1.
Script inspired from its corresponding Wikipedia article
https://en.wikipedia.org/wiki/Sigmoid_function
"""
import numpy as np
def sigmoid(vector: np.array) -> np.array:
"""
Implements the sigmoid function
Parameters:
vector (np.array): A numpy array of shape (1,n)
consisting of real values
Returns:
sigmoid_vec (np.array): The input numpy array, after applying
sigmoid.
Examples:
>>> sigmoid(np.array([-1.0, 1.0, 2.0]))
array([0.26894142, 0.73105858, 0.88079708])
>>> sigmoid(np.array([0.0]))
array([0.5])
"""
return 1 / (1 + np.exp(-vector))
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """example of simple chaos machine"""
# Chaos Machine (K, t, m)
K = [0.33, 0.44, 0.55, 0.44, 0.33]
t = 3
m = 5
# Buffer Space (with Parameters Space)
buffer_space: list[float] = []
params_space: list[float] = []
# Machine Time
machine_time = 0
def push(seed):
global buffer_space, params_space, machine_time, K, m, t
# Choosing Dynamical Systems (All)
for key, value in enumerate(buffer_space):
# Evolution Parameter
e = float(seed / value)
# Control Theory: Orbit Change
value = (buffer_space[(key + 1) % m] + e) % 1
# Control Theory: Trajectory Change
r = (params_space[key] + e) % 1 + 3
# Modification (Transition Function) - Jumps
buffer_space[key] = round(float(r * value * (1 - value)), 10)
params_space[key] = r # Saving to Parameters Space
# Logistic Map
assert max(buffer_space) < 1
assert max(params_space) < 4
# Machine Time
machine_time += 1
def pull():
global buffer_space, params_space, machine_time, K, m, t
# PRNG (Xorshift by George Marsaglia)
def xorshift(x, y):
x ^= y >> 13
y ^= x << 17
x ^= y >> 5
return x
# Choosing Dynamical Systems (Increment)
key = machine_time % m
# Evolution (Time Length)
for _ in range(0, t):
# Variables (Position + Parameters)
r = params_space[key]
value = buffer_space[key]
# Modification (Transition Function) - Flow
buffer_space[key] = round(float(r * value * (1 - value)), 10)
params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3
# Choosing Chaotic Data
x = int(buffer_space[(key + 2) % m] * (10**10))
y = int(buffer_space[(key - 2) % m] * (10**10))
# Machine Time
machine_time += 1
return xorshift(x, y) % 0xFFFFFFFF
def reset():
global buffer_space, params_space, machine_time, K, m, t
buffer_space = K
params_space = [0] * m
machine_time = 0
if __name__ == "__main__":
# Initialization
reset()
# Pushing Data (Input)
import random
message = random.sample(range(0xFFFFFFFF), 100)
for chunk in message:
push(chunk)
# for controlling
inp = ""
# Pulling Data (Output)
while inp in ("e", "E"):
print(f"{format(pull(), '#04x')}")
print(buffer_space)
print(params_space)
inp = input("(e)exit? ").strip()
| """example of simple chaos machine"""
# Chaos Machine (K, t, m)
K = [0.33, 0.44, 0.55, 0.44, 0.33]
t = 3
m = 5
# Buffer Space (with Parameters Space)
buffer_space: list[float] = []
params_space: list[float] = []
# Machine Time
machine_time = 0
def push(seed):
global buffer_space, params_space, machine_time, K, m, t
# Choosing Dynamical Systems (All)
for key, value in enumerate(buffer_space):
# Evolution Parameter
e = float(seed / value)
# Control Theory: Orbit Change
value = (buffer_space[(key + 1) % m] + e) % 1
# Control Theory: Trajectory Change
r = (params_space[key] + e) % 1 + 3
# Modification (Transition Function) - Jumps
buffer_space[key] = round(float(r * value * (1 - value)), 10)
params_space[key] = r # Saving to Parameters Space
# Logistic Map
assert max(buffer_space) < 1
assert max(params_space) < 4
# Machine Time
machine_time += 1
def pull():
global buffer_space, params_space, machine_time, K, m, t
# PRNG (Xorshift by George Marsaglia)
def xorshift(x, y):
x ^= y >> 13
y ^= x << 17
x ^= y >> 5
return x
# Choosing Dynamical Systems (Increment)
key = machine_time % m
# Evolution (Time Length)
for _ in range(0, t):
# Variables (Position + Parameters)
r = params_space[key]
value = buffer_space[key]
# Modification (Transition Function) - Flow
buffer_space[key] = round(float(r * value * (1 - value)), 10)
params_space[key] = (machine_time * 0.01 + r * 1.01) % 1 + 3
# Choosing Chaotic Data
x = int(buffer_space[(key + 2) % m] * (10**10))
y = int(buffer_space[(key - 2) % m] * (10**10))
# Machine Time
machine_time += 1
return xorshift(x, y) % 0xFFFFFFFF
def reset():
global buffer_space, params_space, machine_time, K, m, t
buffer_space = K
params_space = [0] * m
machine_time = 0
if __name__ == "__main__":
# Initialization
reset()
# Pushing Data (Input)
import random
message = random.sample(range(0xFFFFFFFF), 100)
for chunk in message:
push(chunk)
# for controlling
inp = ""
# Pulling Data (Output)
while inp in ("e", "E"):
print(f"{format(pull(), '#04x')}")
print(buffer_space)
print(params_space)
inp = input("(e)exit? ").strip()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # Video Explanation: https://www.youtube.com/watch?v=6w60Zi1NtL8&feature=emb_logo
from __future__ import annotations
def maximum_non_adjacent_sum(nums: list[int]) -> int:
"""
Find the maximum non-adjacent sum of the integers in the nums input list
>>> print(maximum_non_adjacent_sum([1, 2, 3]))
4
>>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
18
>>> maximum_non_adjacent_sum([-1, -5, -3, -7, -2, -2, -6])
0
>>> maximum_non_adjacent_sum([499, 500, -3, -7, -2, -2, -6])
500
"""
if not nums:
return 0
max_including = nums[0]
max_excluding = 0
for num in nums[1:]:
max_including, max_excluding = (
max_excluding + num,
max(max_including, max_excluding),
)
return max(max_excluding, max_including)
if __name__ == "__main__":
import doctest
doctest.testmod()
| # Video Explanation: https://www.youtube.com/watch?v=6w60Zi1NtL8&feature=emb_logo
from __future__ import annotations
def maximum_non_adjacent_sum(nums: list[int]) -> int:
"""
Find the maximum non-adjacent sum of the integers in the nums input list
>>> print(maximum_non_adjacent_sum([1, 2, 3]))
4
>>> maximum_non_adjacent_sum([1, 5, 3, 7, 2, 2, 6])
18
>>> maximum_non_adjacent_sum([-1, -5, -3, -7, -2, -2, -6])
0
>>> maximum_non_adjacent_sum([499, 500, -3, -7, -2, -2, -6])
500
"""
if not nums:
return 0
max_including = nums[0]
max_excluding = 0
for num in nums[1:]:
max_including, max_excluding = (
max_excluding + num,
max(max_including, max_excluding),
)
return max(max_excluding, max_including)
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import math
def perfect_square(num: int) -> bool:
"""
Check if a number is perfect square number or not
:param num: the number to be checked
:return: True if number is square number, otherwise False
>>> perfect_square(9)
True
>>> perfect_square(16)
True
>>> perfect_square(1)
True
>>> perfect_square(0)
True
>>> perfect_square(10)
False
"""
return math.sqrt(num) * math.sqrt(num) == num
def perfect_square_binary_search(n: int) -> bool:
"""
Check if a number is perfect square using binary search.
Time complexity : O(Log(n))
Space complexity: O(1)
>>> perfect_square_binary_search(9)
True
>>> perfect_square_binary_search(16)
True
>>> perfect_square_binary_search(1)
True
>>> perfect_square_binary_search(0)
True
>>> perfect_square_binary_search(10)
False
>>> perfect_square_binary_search(-1)
False
>>> perfect_square_binary_search(1.1)
False
>>> perfect_square_binary_search("a")
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'str'
>>> perfect_square_binary_search(None)
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
>>> perfect_square_binary_search([])
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'list'
"""
left = 0
right = n
while left <= right:
mid = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
right = mid - 1
else:
left = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| import math
def perfect_square(num: int) -> bool:
"""
Check if a number is perfect square number or not
:param num: the number to be checked
:return: True if number is square number, otherwise False
>>> perfect_square(9)
True
>>> perfect_square(16)
True
>>> perfect_square(1)
True
>>> perfect_square(0)
True
>>> perfect_square(10)
False
"""
return math.sqrt(num) * math.sqrt(num) == num
def perfect_square_binary_search(n: int) -> bool:
"""
Check if a number is perfect square using binary search.
Time complexity : O(Log(n))
Space complexity: O(1)
>>> perfect_square_binary_search(9)
True
>>> perfect_square_binary_search(16)
True
>>> perfect_square_binary_search(1)
True
>>> perfect_square_binary_search(0)
True
>>> perfect_square_binary_search(10)
False
>>> perfect_square_binary_search(-1)
False
>>> perfect_square_binary_search(1.1)
False
>>> perfect_square_binary_search("a")
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'str'
>>> perfect_square_binary_search(None)
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'NoneType'
>>> perfect_square_binary_search([])
Traceback (most recent call last):
...
TypeError: '<=' not supported between instances of 'int' and 'list'
"""
left = 0
right = n
while left <= right:
mid = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
right = mid - 1
else:
left = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import math
def rearrange(bit_string_32):
"""[summary]
Regroups the given binary string.
Arguments:
bitString32 {[string]} -- [32 bit binary]
Raises:
ValueError -- [if the given string not are 32 bit binary string]
Returns:
[string] -- [32 bit binary string]
>>> rearrange('1234567890abcdfghijklmnopqrstuvw')
'pqrstuvwhijklmno90abcdfg12345678'
"""
if len(bit_string_32) != 32:
raise ValueError("Need length 32")
new_string = ""
for i in [3, 2, 1, 0]:
new_string += bit_string_32[8 * i : 8 * i + 8]
return new_string
def reformat_hex(i):
"""[summary]
Converts the given integer into 8-digit hex number.
Arguments:
i {[int]} -- [integer]
>>> reformat_hex(666)
'9a020000'
"""
hexrep = format(i, "08x")
thing = ""
for i in [3, 2, 1, 0]:
thing += hexrep[2 * i : 2 * i + 2]
return thing
def pad(bit_string):
"""[summary]
Fills up the binary string to a 512 bit binary string
Arguments:
bitString {[string]} -- [binary string]
Returns:
[string] -- [binary string]
"""
start_length = len(bit_string)
bit_string += "1"
while len(bit_string) % 512 != 448:
bit_string += "0"
last_part = format(start_length, "064b")
bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32])
return bit_string
def get_block(bit_string):
"""[summary]
Iterator:
Returns by each call a list of length 16 with the 32 bit
integer blocks.
Arguments:
bit_string {[string]} -- [binary string >= 512]
"""
curr_pos = 0
while curr_pos < len(bit_string):
curr_part = bit_string[curr_pos : curr_pos + 512]
my_splits = []
for i in range(16):
my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2))
yield my_splits
curr_pos += 512
def not32(i):
"""
>>> not32(34)
4294967261
"""
i_str = format(i, "032b")
new_str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(new_str, 2)
def sum32(a, b):
return (a + b) % 2**32
def leftrot32(i, s):
return (i << s) ^ (i >> (32 - s))
def md5me(test_string):
"""[summary]
Returns a 32-bit hash code of the string 'testString'
Arguments:
testString {[string]} -- [message]
"""
bs = ""
for i in test_string:
bs += format(ord(i), "08b")
bs = pad(bs)
tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)]
a0 = 0x67452301
b0 = 0xEFCDAB89
c0 = 0x98BADCFE
d0 = 0x10325476
s = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
for m in get_block(bs):
a = a0
b = b0
c = c0
d = d0
for i in range(64):
if i <= 15:
# f = (B & C) | (not32(B) & D)
f = d ^ (b & (c ^ d))
g = i
elif i <= 31:
# f = (D & B) | (not32(D) & C)
f = c ^ (d & (b ^ c))
g = (5 * i + 1) % 16
elif i <= 47:
f = b ^ c ^ d
g = (3 * i + 5) % 16
else:
f = c ^ (b | not32(d))
g = (7 * i) % 16
dtemp = d
d = c
c = b
b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i]))
a = dtemp
a0 = sum32(a0, a)
b0 = sum32(b0, b)
c0 = sum32(c0, c)
d0 = sum32(d0, d)
digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0)
return digest
def test():
assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
assert (
md5me("The quick brown fox jumps over the lazy dog")
== "9e107d9d372bb6826bd81d3542a419d6"
)
print("Success.")
if __name__ == "__main__":
test()
import doctest
doctest.testmod()
| import math
def rearrange(bit_string_32):
"""[summary]
Regroups the given binary string.
Arguments:
bitString32 {[string]} -- [32 bit binary]
Raises:
ValueError -- [if the given string not are 32 bit binary string]
Returns:
[string] -- [32 bit binary string]
>>> rearrange('1234567890abcdfghijklmnopqrstuvw')
'pqrstuvwhijklmno90abcdfg12345678'
"""
if len(bit_string_32) != 32:
raise ValueError("Need length 32")
new_string = ""
for i in [3, 2, 1, 0]:
new_string += bit_string_32[8 * i : 8 * i + 8]
return new_string
def reformat_hex(i):
"""[summary]
Converts the given integer into 8-digit hex number.
Arguments:
i {[int]} -- [integer]
>>> reformat_hex(666)
'9a020000'
"""
hexrep = format(i, "08x")
thing = ""
for i in [3, 2, 1, 0]:
thing += hexrep[2 * i : 2 * i + 2]
return thing
def pad(bit_string):
"""[summary]
Fills up the binary string to a 512 bit binary string
Arguments:
bitString {[string]} -- [binary string]
Returns:
[string] -- [binary string]
"""
start_length = len(bit_string)
bit_string += "1"
while len(bit_string) % 512 != 448:
bit_string += "0"
last_part = format(start_length, "064b")
bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32])
return bit_string
def get_block(bit_string):
"""[summary]
Iterator:
Returns by each call a list of length 16 with the 32 bit
integer blocks.
Arguments:
bit_string {[string]} -- [binary string >= 512]
"""
curr_pos = 0
while curr_pos < len(bit_string):
curr_part = bit_string[curr_pos : curr_pos + 512]
my_splits = []
for i in range(16):
my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2))
yield my_splits
curr_pos += 512
def not32(i):
"""
>>> not32(34)
4294967261
"""
i_str = format(i, "032b")
new_str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(new_str, 2)
def sum32(a, b):
return (a + b) % 2**32
def leftrot32(i, s):
return (i << s) ^ (i >> (32 - s))
def md5me(test_string):
"""[summary]
Returns a 32-bit hash code of the string 'testString'
Arguments:
testString {[string]} -- [message]
"""
bs = ""
for i in test_string:
bs += format(ord(i), "08b")
bs = pad(bs)
tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)]
a0 = 0x67452301
b0 = 0xEFCDAB89
c0 = 0x98BADCFE
d0 = 0x10325476
s = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
for m in get_block(bs):
a = a0
b = b0
c = c0
d = d0
for i in range(64):
if i <= 15:
# f = (B & C) | (not32(B) & D)
f = d ^ (b & (c ^ d))
g = i
elif i <= 31:
# f = (D & B) | (not32(D) & C)
f = c ^ (d & (b ^ c))
g = (5 * i + 1) % 16
elif i <= 47:
f = b ^ c ^ d
g = (3 * i + 5) % 16
else:
f = c ^ (b | not32(d))
g = (7 * i) % 16
dtemp = d
d = c
c = b
b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i]))
a = dtemp
a0 = sum32(a0, a)
b0 = sum32(b0, b)
c0 = sum32(c0, c)
d0 = sum32(d0, d)
digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0)
return digest
def test():
assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e"
assert (
md5me("The quick brown fox jumps over the lazy dog")
== "9e107d9d372bb6826bd81d3542a419d6"
)
print("Success.")
if __name__ == "__main__":
test()
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import glob
import os
import random
from string import ascii_lowercase, digits
import cv2
"""
Flip image and bounding box for computer vision task
https://paperswithcode.com/method/randomhorizontalflip
"""
# Params
LABEL_DIR = ""
IMAGE_DIR = ""
OUTPUT_DIR = ""
FLIP_TYPE = 1 # (0 is vertical, 1 is horizontal)
def main() -> None:
"""
Get images list and annotations list from input dir.
Update new images and annotations.
Save images and annotations in output dir.
>>> pass # A doctest is not possible for this function.
"""
img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR)
print("Processing...")
new_images, new_annos, paths = update_image_and_anno(img_paths, annos, FLIP_TYPE)
for index, image in enumerate(new_images):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
letter_code = random_chars(32)
file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0]
file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cv2.imwrite(f"/{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85])
print(f"Success {index+1}/{len(new_images)} with {file_name}")
annos_list = []
for anno in new_annos[index]:
obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(obj)
with open(f"/{file_root}.txt", "w") as outfile:
outfile.write("\n".join(line for line in annos_list))
def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
"""
- label_dir <type: str>: Path to label include annotation of images
- img_dir <type: str>: Path to folder contain images
Return <type: list>: List of images path and labels
>>> pass # A doctest is not possible for this function.
"""
img_paths = []
labels = []
for label_file in glob.glob(os.path.join(label_dir, "*.txt")):
label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0]
with open(label_file) as in_file:
obj_lists = in_file.readlines()
img_path = os.path.join(img_dir, f"{label_name}.jpg")
boxes = []
for obj_list in obj_lists:
obj = obj_list.rstrip("\n").split(" ")
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
]
)
if not boxes:
continue
img_paths.append(img_path)
labels.append(boxes)
return img_paths, labels
def update_image_and_anno(
img_list: list, anno_list: list, flip_type: int = 1
) -> tuple[list, list, list]:
"""
- img_list <type: list>: list of all images
- anno_list <type: list>: list of all annotations of specific image
- flip_type <type: int>: 0 is vertical, 1 is horizontal
Return:
- new_imgs_list <type: narray>: image after resize
- new_annos_lists <type: list>: list of new annotation after scale
- path_list <type: list>: list the name of image file
>>> pass # A doctest is not possible for this function.
"""
new_annos_lists = []
path_list = []
new_imgs_list = []
for idx in range(len(img_list)):
new_annos = []
path = img_list[idx]
path_list.append(path)
img_annos = anno_list[idx]
img = cv2.imread(path)
if flip_type == 1:
new_img = cv2.flip(img, flip_type)
for bbox in img_annos:
x_center_new = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
new_img = cv2.flip(img, flip_type)
for bbox in img_annos:
y_center_new = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(new_annos)
new_imgs_list.append(new_img)
return new_imgs_list, new_annos_lists, path_list
def random_chars(number_char: int = 32) -> str:
"""
Automatic generate random 32 characters.
Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
>>> len(random_chars(32))
32
"""
assert number_char > 1, "The number of character should greater than 1"
letter_code = ascii_lowercase + digits
return "".join(random.choice(letter_code) for _ in range(number_char))
if __name__ == "__main__":
main()
print("DONE ✅")
| import glob
import os
import random
from string import ascii_lowercase, digits
import cv2
"""
Flip image and bounding box for computer vision task
https://paperswithcode.com/method/randomhorizontalflip
"""
# Params
LABEL_DIR = ""
IMAGE_DIR = ""
OUTPUT_DIR = ""
FLIP_TYPE = 1 # (0 is vertical, 1 is horizontal)
def main() -> None:
"""
Get images list and annotations list from input dir.
Update new images and annotations.
Save images and annotations in output dir.
>>> pass # A doctest is not possible for this function.
"""
img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR)
print("Processing...")
new_images, new_annos, paths = update_image_and_anno(img_paths, annos, FLIP_TYPE)
for index, image in enumerate(new_images):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
letter_code = random_chars(32)
file_name = paths[index].split(os.sep)[-1].rsplit(".", 1)[0]
file_root = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cv2.imwrite(f"/{file_root}.jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 85])
print(f"Success {index+1}/{len(new_images)} with {file_name}")
annos_list = []
for anno in new_annos[index]:
obj = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(obj)
with open(f"/{file_root}.txt", "w") as outfile:
outfile.write("\n".join(line for line in annos_list))
def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]:
"""
- label_dir <type: str>: Path to label include annotation of images
- img_dir <type: str>: Path to folder contain images
Return <type: list>: List of images path and labels
>>> pass # A doctest is not possible for this function.
"""
img_paths = []
labels = []
for label_file in glob.glob(os.path.join(label_dir, "*.txt")):
label_name = label_file.split(os.sep)[-1].rsplit(".", 1)[0]
with open(label_file) as in_file:
obj_lists = in_file.readlines()
img_path = os.path.join(img_dir, f"{label_name}.jpg")
boxes = []
for obj_list in obj_lists:
obj = obj_list.rstrip("\n").split(" ")
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
]
)
if not boxes:
continue
img_paths.append(img_path)
labels.append(boxes)
return img_paths, labels
def update_image_and_anno(
img_list: list, anno_list: list, flip_type: int = 1
) -> tuple[list, list, list]:
"""
- img_list <type: list>: list of all images
- anno_list <type: list>: list of all annotations of specific image
- flip_type <type: int>: 0 is vertical, 1 is horizontal
Return:
- new_imgs_list <type: narray>: image after resize
- new_annos_lists <type: list>: list of new annotation after scale
- path_list <type: list>: list the name of image file
>>> pass # A doctest is not possible for this function.
"""
new_annos_lists = []
path_list = []
new_imgs_list = []
for idx in range(len(img_list)):
new_annos = []
path = img_list[idx]
path_list.append(path)
img_annos = anno_list[idx]
img = cv2.imread(path)
if flip_type == 1:
new_img = cv2.flip(img, flip_type)
for bbox in img_annos:
x_center_new = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
new_img = cv2.flip(img, flip_type)
for bbox in img_annos:
y_center_new = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(new_annos)
new_imgs_list.append(new_img)
return new_imgs_list, new_annos_lists, path_list
def random_chars(number_char: int = 32) -> str:
"""
Automatic generate random 32 characters.
Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
>>> len(random_chars(32))
32
"""
assert number_char > 1, "The number of character should greater than 1"
letter_code = ascii_lowercase + digits
return "".join(random.choice(letter_code) for _ in range(number_char))
if __name__ == "__main__":
main()
print("DONE ✅")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Program to encode and decode Baconian or Bacon's Cipher
Wikipedia reference : https://en.wikipedia.org/wiki/Bacon%27s_cipher
"""
encode_dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
decode_dict = {value: key for key, value in encode_dict.items()}
def encode(word: str) -> str:
"""
Encodes to Baconian cipher
>>> encode("hello")
'AABBBAABAAABABAABABAABBAB'
>>> encode("hello world")
'AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB'
>>> encode("hello world!")
Traceback (most recent call last):
...
Exception: encode() accepts only letters of the alphabet and spaces
"""
encoded = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def decode(coded: str) -> str:
"""
Decodes from Baconian cipher
>>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB")
'hello world'
>>> decode("AABBBAABAAABABAABABAABBAB")
'hello'
>>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB!")
Traceback (most recent call last):
...
Exception: decode() accepts only 'A', 'B' and spaces
"""
if set(coded) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces")
decoded = ""
for word in coded.split():
while len(word) != 0:
decoded += decode_dict[word[:5]]
word = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| """
Program to encode and decode Baconian or Bacon's Cipher
Wikipedia reference : https://en.wikipedia.org/wiki/Bacon%27s_cipher
"""
encode_dict = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
decode_dict = {value: key for key, value in encode_dict.items()}
def encode(word: str) -> str:
"""
Encodes to Baconian cipher
>>> encode("hello")
'AABBBAABAAABABAABABAABBAB'
>>> encode("hello world")
'AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB'
>>> encode("hello world!")
Traceback (most recent call last):
...
Exception: encode() accepts only letters of the alphabet and spaces
"""
encoded = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def decode(coded: str) -> str:
"""
Decodes from Baconian cipher
>>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB")
'hello world'
>>> decode("AABBBAABAAABABAABABAABBAB")
'hello'
>>> decode("AABBBAABAAABABAABABAABBAB BABAAABBABBAAAAABABAAAABB!")
Traceback (most recent call last):
...
Exception: decode() accepts only 'A', 'B' and spaces
"""
if set(coded) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces")
decoded = ""
for word in coded.split():
while len(word) != 0:
decoded += decode_dict[word[:5]]
word = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 1: https://projecteuler.net/problem=1
Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def solution(n: int = 1000) -> int:
"""
Returns the sum of all the multiples of 3 or 5 below n.
>>> solution(3)
0
>>> solution(4)
3
>>> solution(10)
23
>>> solution(600)
83700
>>> solution(-7)
0
"""
return sum(e for e in range(3, n) if e % 3 == 0 or e % 5 == 0)
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 1: https://projecteuler.net/problem=1
Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def solution(n: int = 1000) -> int:
"""
Returns the sum of all the multiples of 3 or 5 below n.
>>> solution(3)
0
>>> solution(4)
3
>>> solution(10)
23
>>> solution(600)
83700
>>> solution(-7)
0
"""
return sum(e for e in range(3, n) if e % 3 == 0 or e % 5 == 0)
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance"""
def jaro_winkler(str1: str, str2: str) -> float:
"""
Jaro–Winkler distance is a string metric measuring an edit distance between two
sequences.
Output value is between 0.0 and 1.0.
>>> jaro_winkler("martha", "marhta")
0.9611111111111111
>>> jaro_winkler("CRATE", "TRACE")
0.7333333333333334
>>> jaro_winkler("test", "dbdbdbdb")
0.0
>>> jaro_winkler("test", "test")
1.0
>>> jaro_winkler("hello world", "HeLLo W0rlD")
0.6363636363636364
>>> jaro_winkler("test", "")
0.0
>>> jaro_winkler("hello", "world")
0.4666666666666666
>>> jaro_winkler("hell**o", "*world")
0.4365079365079365
"""
def get_matched_characters(_str1: str, _str2: str) -> str:
matched = []
limit = min(len(_str1), len(_str2)) // 2
for i, l in enumerate(_str1):
left = int(max(0, i - limit))
right = int(min(i + limit + 1, len(_str2)))
if l in _str2[left:right]:
matched.append(l)
_str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}"
return "".join(matched)
# matching characters
matching_1 = get_matched_characters(str1, str2)
matching_2 = get_matched_characters(str2, str1)
match_count = len(matching_1)
# transposition
transpositions = (
len([(c1, c2) for c1, c2 in zip(matching_1, matching_2) if c1 != c2]) // 2
)
if not match_count:
jaro = 0.0
else:
jaro = (
1
/ 3
* (
match_count / len(str1)
+ match_count / len(str2)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
prefix_len = 0
for c1, c2 in zip(str1[:4], str2[:4]):
if c1 == c2:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| """https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance"""
def jaro_winkler(str1: str, str2: str) -> float:
"""
Jaro–Winkler distance is a string metric measuring an edit distance between two
sequences.
Output value is between 0.0 and 1.0.
>>> jaro_winkler("martha", "marhta")
0.9611111111111111
>>> jaro_winkler("CRATE", "TRACE")
0.7333333333333334
>>> jaro_winkler("test", "dbdbdbdb")
0.0
>>> jaro_winkler("test", "test")
1.0
>>> jaro_winkler("hello world", "HeLLo W0rlD")
0.6363636363636364
>>> jaro_winkler("test", "")
0.0
>>> jaro_winkler("hello", "world")
0.4666666666666666
>>> jaro_winkler("hell**o", "*world")
0.4365079365079365
"""
def get_matched_characters(_str1: str, _str2: str) -> str:
matched = []
limit = min(len(_str1), len(_str2)) // 2
for i, l in enumerate(_str1):
left = int(max(0, i - limit))
right = int(min(i + limit + 1, len(_str2)))
if l in _str2[left:right]:
matched.append(l)
_str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}"
return "".join(matched)
# matching characters
matching_1 = get_matched_characters(str1, str2)
matching_2 = get_matched_characters(str2, str1)
match_count = len(matching_1)
# transposition
transpositions = (
len([(c1, c2) for c1, c2 in zip(matching_1, matching_2) if c1 != c2]) // 2
)
if not match_count:
jaro = 0.0
else:
jaro = (
1
/ 3
* (
match_count / len(str1)
+ match_count / len(str2)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
prefix_len = 0
for c1, c2 in zip(str1[:4], str2[:4]):
if c1 == c2:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
A Trie/Prefix Tree is a kind of search tree used to provide quick lookup
of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity
making it impractical in practice. It however provides O(max(search_string, length of
longest word)) lookup time making it an optimal approach when space is not an issue.
"""
class TrieNode:
def __init__(self) -> None:
self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode
self.is_leaf = False
def insert_many(self, words: list[str]) -> None:
"""
Inserts a list of words into the Trie
:param words: list of string words
:return: None
"""
for word in words:
self.insert(word)
def insert(self, word: str) -> None:
"""
Inserts a word into the Trie
:param word: word to be inserted
:return: None
"""
curr = self
for char in word:
if char not in curr.nodes:
curr.nodes[char] = TrieNode()
curr = curr.nodes[char]
curr.is_leaf = True
def find(self, word: str) -> bool:
"""
Tries to find word in a Trie
:param word: word to look for
:return: Returns True if word is found, False otherwise
"""
curr = self
for char in word:
if char not in curr.nodes:
return False
curr = curr.nodes[char]
return curr.is_leaf
def delete(self, word: str) -> None:
"""
Deletes a word in a Trie
:param word: word to delete
:return: None
"""
def _delete(curr: TrieNode, word: str, index: int) -> bool:
if index == len(word):
# If word does not exist
if not curr.is_leaf:
return False
curr.is_leaf = False
return len(curr.nodes) == 0
char = word[index]
char_node = curr.nodes.get(char)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
delete_curr = _delete(char_node, word, index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self, word, 0)
def print_words(node: TrieNode, word: str) -> None:
"""
Prints all the words in a Trie
:param node: root node of Trie
:param word: Word variable should be empty at start
:return: None
"""
if node.is_leaf:
print(word, end=" ")
for key, value in node.nodes.items():
print_words(value, word + key)
def test_trie() -> bool:
words = "banana bananas bandana band apple all beast".split()
root = TrieNode()
root.insert_many(words)
# print_words(root, "")
assert all(root.find(word) for word in words)
assert root.find("banana")
assert not root.find("bandanas")
assert not root.find("apps")
assert root.find("apple")
assert root.find("all")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def print_results(msg: str, passes: bool) -> None:
print(str(msg), "works!" if passes else "doesn't work :(")
def pytests() -> None:
assert test_trie()
def main() -> None:
"""
>>> pytests()
"""
print_results("Testing trie functionality", test_trie())
if __name__ == "__main__":
main()
| """
A Trie/Prefix Tree is a kind of search tree used to provide quick lookup
of words/patterns in a set of words. A basic Trie however has O(n^2) space complexity
making it impractical in practice. It however provides O(max(search_string, length of
longest word)) lookup time making it an optimal approach when space is not an issue.
"""
class TrieNode:
def __init__(self) -> None:
self.nodes: dict[str, TrieNode] = {} # Mapping from char to TrieNode
self.is_leaf = False
def insert_many(self, words: list[str]) -> None:
"""
Inserts a list of words into the Trie
:param words: list of string words
:return: None
"""
for word in words:
self.insert(word)
def insert(self, word: str) -> None:
"""
Inserts a word into the Trie
:param word: word to be inserted
:return: None
"""
curr = self
for char in word:
if char not in curr.nodes:
curr.nodes[char] = TrieNode()
curr = curr.nodes[char]
curr.is_leaf = True
def find(self, word: str) -> bool:
"""
Tries to find word in a Trie
:param word: word to look for
:return: Returns True if word is found, False otherwise
"""
curr = self
for char in word:
if char not in curr.nodes:
return False
curr = curr.nodes[char]
return curr.is_leaf
def delete(self, word: str) -> None:
"""
Deletes a word in a Trie
:param word: word to delete
:return: None
"""
def _delete(curr: TrieNode, word: str, index: int) -> bool:
if index == len(word):
# If word does not exist
if not curr.is_leaf:
return False
curr.is_leaf = False
return len(curr.nodes) == 0
char = word[index]
char_node = curr.nodes.get(char)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
delete_curr = _delete(char_node, word, index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self, word, 0)
def print_words(node: TrieNode, word: str) -> None:
"""
Prints all the words in a Trie
:param node: root node of Trie
:param word: Word variable should be empty at start
:return: None
"""
if node.is_leaf:
print(word, end=" ")
for key, value in node.nodes.items():
print_words(value, word + key)
def test_trie() -> bool:
words = "banana bananas bandana band apple all beast".split()
root = TrieNode()
root.insert_many(words)
# print_words(root, "")
assert all(root.find(word) for word in words)
assert root.find("banana")
assert not root.find("bandanas")
assert not root.find("apps")
assert root.find("apple")
assert root.find("all")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def print_results(msg: str, passes: bool) -> None:
print(str(msg), "works!" if passes else "doesn't work :(")
def pytests() -> None:
assert test_trie()
def main() -> None:
"""
>>> pytests()
"""
print_results("Testing trie functionality", test_trie())
if __name__ == "__main__":
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Find the kth smallest element in linear time using divide and conquer.
Recall we can do this trivially in O(nlogn) time. Sort the list and
access kth element in constant time.
This is a divide and conquer algorithm that can find a solution in O(n) time.
For more information of this algorithm:
https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf
"""
from __future__ import annotations
from random import choice
def random_pivot(lst):
"""
Choose a random pivot for the list.
We can use a more sophisticated algorithm here, such as the median-of-medians
algorithm.
"""
return choice(lst)
def kth_number(lst: list[int], k: int) -> int:
"""
Return the kth smallest number in lst.
>>> kth_number([2, 1, 3, 4, 5], 3)
3
>>> kth_number([2, 1, 3, 4, 5], 1)
1
>>> kth_number([2, 1, 3, 4, 5], 5)
5
>>> kth_number([3, 2, 5, 6, 7, 8], 2)
3
>>> kth_number([25, 21, 98, 100, 76, 22, 43, 60, 89, 87], 4)
43
"""
# pick a pivot and separate into list based on pivot.
pivot = random_pivot(lst)
# partition based on pivot
# linear time
small = [e for e in lst if e < pivot]
big = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(small) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(small) < k - 1:
return kth_number(big, k - len(small) - 1)
# pivot is in elements smaller than k
else:
return kth_number(small, k)
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Find the kth smallest element in linear time using divide and conquer.
Recall we can do this trivially in O(nlogn) time. Sort the list and
access kth element in constant time.
This is a divide and conquer algorithm that can find a solution in O(n) time.
For more information of this algorithm:
https://web.stanford.edu/class/archive/cs/cs161/cs161.1138/lectures/08/Small08.pdf
"""
from __future__ import annotations
from random import choice
def random_pivot(lst):
"""
Choose a random pivot for the list.
We can use a more sophisticated algorithm here, such as the median-of-medians
algorithm.
"""
return choice(lst)
def kth_number(lst: list[int], k: int) -> int:
"""
Return the kth smallest number in lst.
>>> kth_number([2, 1, 3, 4, 5], 3)
3
>>> kth_number([2, 1, 3, 4, 5], 1)
1
>>> kth_number([2, 1, 3, 4, 5], 5)
5
>>> kth_number([3, 2, 5, 6, 7, 8], 2)
3
>>> kth_number([25, 21, 98, 100, 76, 22, 43, 60, 89, 87], 4)
43
"""
# pick a pivot and separate into list based on pivot.
pivot = random_pivot(lst)
# partition based on pivot
# linear time
small = [e for e in lst if e < pivot]
big = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(small) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(small) < k - 1:
return kth_number(big, k - len(small) - 1)
# pivot is in elements smaller than k
else:
return kth_number(small, k)
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| def reverse_long_words(sentence: str) -> str:
"""
Reverse all words that are longer than 4 characters in a sentence.
>>> reverse_long_words("Hey wollef sroirraw")
'Hey fellow warriors'
>>> reverse_long_words("nohtyP is nohtyP")
'Python is Python'
>>> reverse_long_words("1 12 123 1234 54321 654321")
'1 12 123 1234 12345 123456'
"""
return " ".join(
"".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()
)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| def reverse_long_words(sentence: str) -> str:
"""
Reverse all words that are longer than 4 characters in a sentence.
>>> reverse_long_words("Hey wollef sroirraw")
'Hey fellow warriors'
>>> reverse_long_words("nohtyP is nohtyP")
'Python is Python'
>>> reverse_long_words("1 12 123 1234 54321 654321")
'1 12 123 1234 12345 123456'
"""
return " ".join(
"".join(word[::-1]) if len(word) > 4 else word for word in sentence.split()
)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
this is code for forecasting
but i modified it and used it for safety checker of data
for ex: you have a online shop and for some reason some data are
missing (the amount of data that u expected are not supposed to be)
then we can use it
*ps : 1. ofc we can use normal statistic method but in this case
the data is quite absurd and only a little^^
2. ofc u can use this and modified it for forecasting purpose
for the next 3 months sales or something,
u can just adjust it for ur own purpose
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def linear_regression_prediction(
train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
) -> float:
"""
First method: linear regression
input : training data (date, total_user, total_event) in list of float
output : list of total user prediction in float
>>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
>>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors
True
"""
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
"""
second method: Sarimax
sarimax is a statistic method which using previous input
and learn its pattern to predict future data
input : training data (total_user, with exog data = total_event) in list of float
output : list of total user prediction in float
>>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2])
6.6666671111109626
"""
order = (1, 2, 1)
seasonal_order = (1, 1, 0, 7)
model = SARIMAX(
train_user, exog=train_match, order=order, seasonal_order=seasonal_order
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return result[0]
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
"""
Third method: Support vector regressor
svr is quite the same with svm(support vector machine)
it uses the same principles as the SVM for classification,
with only a few minor differences and the only different is that
it suits better for regression purpose
input : training data (date, total_user, total_event) in list of float
where x = list of set (date and total event)
output : list of total user prediction in float
>>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4])
1.634932078116079
"""
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return y_pred[0]
def interquartile_range_checker(train_user: list) -> float:
"""
Optional method: interquatile range
input : list of total user in float
output : low limit of input in float
this method can be used to check whether some data is outlier or not
>>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
2.8
"""
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return low_lim
def data_safety_checker(list_vote: list, actual_result: float) -> None:
"""
Used to review all the votes (list result prediction)
and compare it to the actual result.
input : list of predictions
output : print whether it's safe or not
>>> data_safety_checker([2,3,4],5.0)
Today's data is not safe.
"""
safe = 0
not_safe = 0
for i in list_vote:
if i > actual_result:
safe = not_safe + 1
else:
if abs(abs(i) - abs(actual_result)) <= 0.1:
safe = safe + 1
else:
not_safe = not_safe + 1
print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.")
# data_input_df = pd.read_csv("ex_data.csv", header=None)
data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"])
"""
data column = total user in a day, how much online event held in one day,
what day is that(sunday-saturday)
"""
# start normalization
normalize_df = Normalizer().fit_transform(data_input_df.values)
# split data
total_date = normalize_df[:, 2].tolist()
total_user = normalize_df[:, 0].tolist()
total_match = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
x = normalize_df[:, [1, 2]].tolist()
x_train = x[: len(x) - 1]
x_test = x[len(x) - 1 :]
# for linear reression & sarimax
trn_date = total_date[: len(total_date) - 1]
trn_user = total_user[: len(total_user) - 1]
trn_match = total_match[: len(total_match) - 1]
tst_date = total_date[len(total_date) - 1 :]
tst_user = total_user[len(total_user) - 1 :]
tst_match = total_match[len(total_match) - 1 :]
# voting system with forecasting
res_vote = []
res_vote.append(
linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match)
)
res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match))
res_vote.append(support_vector_regressor(x_train, x_test, trn_user))
# check the safety of todays'data^^
data_safety_checker(res_vote, tst_user)
| """
this is code for forecasting
but i modified it and used it for safety checker of data
for ex: you have a online shop and for some reason some data are
missing (the amount of data that u expected are not supposed to be)
then we can use it
*ps : 1. ofc we can use normal statistic method but in this case
the data is quite absurd and only a little^^
2. ofc u can use this and modified it for forecasting purpose
for the next 3 months sales or something,
u can just adjust it for ur own purpose
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def linear_regression_prediction(
train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list
) -> float:
"""
First method: linear regression
input : training data (date, total_user, total_event) in list of float
output : list of total user prediction in float
>>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2])
>>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors
True
"""
x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)])
y = np.array(train_usr)
beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float:
"""
second method: Sarimax
sarimax is a statistic method which using previous input
and learn its pattern to predict future data
input : training data (total_user, with exog data = total_event) in list of float
output : list of total user prediction in float
>>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2])
6.6666671111109626
"""
order = (1, 2, 1)
seasonal_order = (1, 1, 0, 7)
model = SARIMAX(
train_user, exog=train_match, order=order, seasonal_order=seasonal_order
)
model_fit = model.fit(disp=False, maxiter=600, method="nm")
result = model_fit.predict(1, len(test_match), exog=[test_match])
return result[0]
def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float:
"""
Third method: Support vector regressor
svr is quite the same with svm(support vector machine)
it uses the same principles as the SVM for classification,
with only a few minor differences and the only different is that
it suits better for regression purpose
input : training data (date, total_user, total_event) in list of float
where x = list of set (date and total event)
output : list of total user prediction in float
>>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4])
1.634932078116079
"""
regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1)
regressor.fit(x_train, train_user)
y_pred = regressor.predict(x_test)
return y_pred[0]
def interquartile_range_checker(train_user: list) -> float:
"""
Optional method: interquatile range
input : list of total user in float
output : low limit of input in float
this method can be used to check whether some data is outlier or not
>>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10])
2.8
"""
train_user.sort()
q1 = np.percentile(train_user, 25)
q3 = np.percentile(train_user, 75)
iqr = q3 - q1
low_lim = q1 - (iqr * 0.1)
return low_lim
def data_safety_checker(list_vote: list, actual_result: float) -> None:
"""
Used to review all the votes (list result prediction)
and compare it to the actual result.
input : list of predictions
output : print whether it's safe or not
>>> data_safety_checker([2,3,4],5.0)
Today's data is not safe.
"""
safe = 0
not_safe = 0
for i in list_vote:
if i > actual_result:
safe = not_safe + 1
else:
if abs(abs(i) - abs(actual_result)) <= 0.1:
safe = safe + 1
else:
not_safe = not_safe + 1
print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.")
# data_input_df = pd.read_csv("ex_data.csv", header=None)
data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"])
"""
data column = total user in a day, how much online event held in one day,
what day is that(sunday-saturday)
"""
# start normalization
normalize_df = Normalizer().fit_transform(data_input_df.values)
# split data
total_date = normalize_df[:, 2].tolist()
total_user = normalize_df[:, 0].tolist()
total_match = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
x = normalize_df[:, [1, 2]].tolist()
x_train = x[: len(x) - 1]
x_test = x[len(x) - 1 :]
# for linear reression & sarimax
trn_date = total_date[: len(total_date) - 1]
trn_user = total_user[: len(total_user) - 1]
trn_match = total_match[: len(total_match) - 1]
tst_date = total_date[len(total_date) - 1 :]
tst_user = total_user[len(total_user) - 1 :]
tst_match = total_match[len(total_match) - 1 :]
# voting system with forecasting
res_vote = []
res_vote.append(
linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match)
)
res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match))
res_vote.append(support_vector_regressor(x_train, x_test, trn_user))
# check the safety of todays'data^^
data_safety_checker(res_vote, tst_user)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
def longest_distance(graph):
indegree = [0] * len(graph)
queue = []
long_dist = [1] * len(graph)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(indegree)):
if indegree[i] == 0:
queue.append(i)
while queue:
vertex = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
long_dist[x] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(x)
print(max(long_dist))
# Adjacency list of Graph
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| # Finding longest distance in Directed Acyclic Graph using KahnsAlgorithm
def longest_distance(graph):
indegree = [0] * len(graph)
queue = []
long_dist = [1] * len(graph)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(indegree)):
if indegree[i] == 0:
queue.append(i)
while queue:
vertex = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
long_dist[x] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(x)
print(max(long_dist))
# Adjacency list of Graph
graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
# Divide and Conquer algorithm
def find_max(nums: list[int | float], left: int, right: int) -> int | float:
"""
find max value in list
:param nums: contains elements
:param left: index of first element
:param right: index of last element
:return: max in nums
>>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
... find_max(nums, 0, len(nums) - 1) == max(nums)
True
True
True
True
>>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
>>> find_max(nums, 0, len(nums) - 1) == max(nums)
True
>>> find_max([], 0, 0)
Traceback (most recent call last):
...
ValueError: find_max() arg is an empty sequence
>>> find_max(nums, 0, len(nums)) == max(nums)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> find_max(nums, -len(nums), -1) == max(nums)
True
>>> find_max(nums, -len(nums) - 1, -1) == max(nums)
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if len(nums) == 0:
raise ValueError("find_max() arg is an empty sequence")
if (
left >= len(nums)
or left < -len(nums)
or right >= len(nums)
or right < -len(nums)
):
raise IndexError("list index out of range")
if left == right:
return nums[left]
mid = (left + right) >> 1 # the middle
left_max = find_max(nums, left, mid) # find max in range[left, mid]
right_max = find_max(nums, mid + 1, right) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| from __future__ import annotations
# Divide and Conquer algorithm
def find_max(nums: list[int | float], left: int, right: int) -> int | float:
"""
find max value in list
:param nums: contains elements
:param left: index of first element
:param right: index of last element
:return: max in nums
>>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]):
... find_max(nums, 0, len(nums) - 1) == max(nums)
True
True
True
True
>>> nums = [1, 3, 5, 7, 9, 2, 4, 6, 8, 10]
>>> find_max(nums, 0, len(nums) - 1) == max(nums)
True
>>> find_max([], 0, 0)
Traceback (most recent call last):
...
ValueError: find_max() arg is an empty sequence
>>> find_max(nums, 0, len(nums)) == max(nums)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> find_max(nums, -len(nums), -1) == max(nums)
True
>>> find_max(nums, -len(nums) - 1, -1) == max(nums)
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if len(nums) == 0:
raise ValueError("find_max() arg is an empty sequence")
if (
left >= len(nums)
or left < -len(nums)
or right >= len(nums)
or right < -len(nums)
):
raise IndexError("list index out of range")
if left == right:
return nums[left]
mid = (left + right) >> 1 # the middle
left_max = find_max(nums, left, mid) # find max in range[left, mid]
right_max = find_max(nums, mid + 1, right) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
developed by: markmelnic
original repo: https://github.com/markmelnic/Scoring-Algorithm
Analyse data using a range based percentual proximity algorithm
and calculate the linear maximum likelihood estimation.
The basic principle is that all values supplied will be broken
down to a range from 0 to 1 and each column's score will be added
up to get the total score.
==========
Example for data of vehicles
price|mileage|registration_year
20k |60k |2012
22k |50k |2011
23k |90k |2015
16k |210k |2010
We want the vehicle with the lowest price,
lowest mileage but newest registration year.
Thus the weights for each column are as follows:
[0, 0, 1]
"""
def procentual_proximity(
source_data: list[list[float]], weights: list[int]
) -> list[list[float]]:
"""
weights - int list
possible values - 0 / 1
0 if lower values have higher weight in the data set
1 if higher values have higher weight in the data set
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
"""
# getting data
data_lists: list[list[float]] = []
for data in source_data:
for i, el in enumerate(data):
if len(data_lists) < i + 1:
data_lists.append([])
data_lists[i].append(float(el))
score_lists: list[list[float]] = []
# calculating each score
for dlist, weight in zip(data_lists, weights):
mind = min(dlist)
maxd = max(dlist)
score: list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
raise ValueError(f"Invalid weight of {weight:f} provided")
score_lists.append(score)
# initialize final scores
final_scores: list[float] = [0 for i in range(len(score_lists[0]))]
# generate final scores
for slist in score_lists:
for j, ele in enumerate(slist):
final_scores[j] = final_scores[j] + ele
# append scores to source data
for i, ele in enumerate(final_scores):
source_data[i].append(ele)
return source_data
| """
developed by: markmelnic
original repo: https://github.com/markmelnic/Scoring-Algorithm
Analyse data using a range based percentual proximity algorithm
and calculate the linear maximum likelihood estimation.
The basic principle is that all values supplied will be broken
down to a range from 0 to 1 and each column's score will be added
up to get the total score.
==========
Example for data of vehicles
price|mileage|registration_year
20k |60k |2012
22k |50k |2011
23k |90k |2015
16k |210k |2010
We want the vehicle with the lowest price,
lowest mileage but newest registration year.
Thus the weights for each column are as follows:
[0, 0, 1]
"""
def procentual_proximity(
source_data: list[list[float]], weights: list[int]
) -> list[list[float]]:
"""
weights - int list
possible values - 0 / 1
0 if lower values have higher weight in the data set
1 if higher values have higher weight in the data set
>>> procentual_proximity([[20, 60, 2012],[23, 90, 2015],[22, 50, 2011]], [0, 0, 1])
[[20, 60, 2012, 2.0], [23, 90, 2015, 1.0], [22, 50, 2011, 1.3333333333333335]]
"""
# getting data
data_lists: list[list[float]] = []
for data in source_data:
for i, el in enumerate(data):
if len(data_lists) < i + 1:
data_lists.append([])
data_lists[i].append(float(el))
score_lists: list[list[float]] = []
# calculating each score
for dlist, weight in zip(data_lists, weights):
mind = min(dlist)
maxd = max(dlist)
score: list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
raise ValueError(f"Invalid weight of {weight:f} provided")
score_lists.append(score)
# initialize final scores
final_scores: list[float] = [0 for i in range(len(score_lists[0]))]
# generate final scores
for slist in score_lists:
for j, ele in enumerate(slist):
final_scores[j] = final_scores[j] + ele
# append scores to source data
for i, ele in enumerate(final_scores):
source_data[i].append(ele)
return source_data
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 1: https://projecteuler.net/problem=1
Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def solution(n: int = 1000) -> int:
"""
Returns the sum of all the multiples of 3 or 5 below n.
>>> solution(3)
0
>>> solution(4)
3
>>> solution(10)
23
>>> solution(600)
83700
"""
result = 0
for i in range(n):
if i % 3 == 0:
result += i
elif i % 5 == 0:
result += i
return result
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 1: https://projecteuler.net/problem=1
Multiples of 3 and 5
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
def solution(n: int = 1000) -> int:
"""
Returns the sum of all the multiples of 3 or 5 below n.
>>> solution(3)
0
>>> solution(4)
3
>>> solution(10)
23
>>> solution(600)
83700
"""
result = 0
for i in range(n):
if i % 3 == 0:
result += i
elif i % 5 == 0:
result += i
return result
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| def bin_to_hexadecimal(binary_str: str) -> str:
"""
Converting a binary string into hexadecimal using Grouping Method
>>> bin_to_hexadecimal('101011111')
'0x15f'
>>> bin_to_hexadecimal(' 1010 ')
'0x0a'
>>> bin_to_hexadecimal('-11101')
'-0x1d'
>>> bin_to_hexadecimal('a')
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
>>> bin_to_hexadecimal('')
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
"""
BITS_TO_HEX = { # noqa: N806
"0000": "0",
"0001": "1",
"0010": "2",
"0011": "3",
"0100": "4",
"0101": "5",
"0110": "6",
"0111": "7",
"1000": "8",
"1001": "9",
"1010": "a",
"1011": "b",
"1100": "c",
"1101": "d",
"1110": "e",
"1111": "f",
}
# Sanitising parameter
binary_str = str(binary_str).strip()
# Exceptions
if not binary_str:
raise ValueError("Empty string was passed to the function")
is_negative = binary_str[0] == "-"
binary_str = binary_str[1:] if is_negative else binary_str
if not all(char in "01" for char in binary_str):
raise ValueError("Non-binary value was passed to the function")
binary_str = (
"0" * (4 * (divmod(len(binary_str), 4)[0] + 1) - len(binary_str)) + binary_str
)
hexadecimal = []
for x in range(0, len(binary_str), 4):
hexadecimal.append(BITS_TO_HEX[binary_str[x : x + 4]])
hexadecimal_str = "0x" + "".join(hexadecimal)
return "-" + hexadecimal_str if is_negative else hexadecimal_str
if __name__ == "__main__":
from doctest import testmod
testmod()
| def bin_to_hexadecimal(binary_str: str) -> str:
"""
Converting a binary string into hexadecimal using Grouping Method
>>> bin_to_hexadecimal('101011111')
'0x15f'
>>> bin_to_hexadecimal(' 1010 ')
'0x0a'
>>> bin_to_hexadecimal('-11101')
'-0x1d'
>>> bin_to_hexadecimal('a')
Traceback (most recent call last):
...
ValueError: Non-binary value was passed to the function
>>> bin_to_hexadecimal('')
Traceback (most recent call last):
...
ValueError: Empty string was passed to the function
"""
BITS_TO_HEX = { # noqa: N806
"0000": "0",
"0001": "1",
"0010": "2",
"0011": "3",
"0100": "4",
"0101": "5",
"0110": "6",
"0111": "7",
"1000": "8",
"1001": "9",
"1010": "a",
"1011": "b",
"1100": "c",
"1101": "d",
"1110": "e",
"1111": "f",
}
# Sanitising parameter
binary_str = str(binary_str).strip()
# Exceptions
if not binary_str:
raise ValueError("Empty string was passed to the function")
is_negative = binary_str[0] == "-"
binary_str = binary_str[1:] if is_negative else binary_str
if not all(char in "01" for char in binary_str):
raise ValueError("Non-binary value was passed to the function")
binary_str = (
"0" * (4 * (divmod(len(binary_str), 4)[0] + 1) - len(binary_str)) + binary_str
)
hexadecimal = []
for x in range(0, len(binary_str), 4):
hexadecimal.append(BITS_TO_HEX[binary_str[x : x + 4]])
hexadecimal_str = "0x" + "".join(hexadecimal)
return "-" + hexadecimal_str if is_negative else hexadecimal_str
if __name__ == "__main__":
from doctest import testmod
testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM
from __future__ import annotations
def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]:
"""
Find all the valid positions a knight can move to from the current position.
>>> get_valid_pos((1, 3), 4)
[(2, 1), (0, 1), (3, 2)]
"""
y, x = position
positions = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
permissible_positions = []
for position in positions:
y_test, x_test = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(position)
return permissible_positions
def is_complete(board: list[list[int]]) -> bool:
"""
Check if the board (matrix) has been completely filled with non-zero values.
>>> is_complete([[1]])
True
>>> is_complete([[1, 2], [3, 0]])
False
"""
return not any(elem == 0 for row in board for elem in row)
def open_knight_tour_helper(
board: list[list[int]], pos: tuple[int, int], curr: int
) -> bool:
"""
Helper function to solve knight tour problem.
"""
if is_complete(board):
return True
for position in get_valid_pos(pos, len(board)):
y, x = position
if board[y][x] == 0:
board[y][x] = curr + 1
if open_knight_tour_helper(board, position, curr + 1):
return True
board[y][x] = 0
return False
def open_knight_tour(n: int) -> list[list[int]]:
"""
Find the solution for the knight tour problem for a board of size n. Raises
ValueError if the tour cannot be performed for the given size.
>>> open_knight_tour(1)
[[1]]
>>> open_knight_tour(2)
Traceback (most recent call last):
...
ValueError: Open Kight Tour cannot be performed on a board of size 2
"""
board = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(n):
board[i][j] = 1
if open_knight_tour_helper(board, (i, j), 1):
return board
board[i][j] = 0
raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}")
if __name__ == "__main__":
import doctest
doctest.testmod()
| # Knight Tour Intro: https://www.youtube.com/watch?v=ab_dY3dZFHM
from __future__ import annotations
def get_valid_pos(position: tuple[int, int], n: int) -> list[tuple[int, int]]:
"""
Find all the valid positions a knight can move to from the current position.
>>> get_valid_pos((1, 3), 4)
[(2, 1), (0, 1), (3, 2)]
"""
y, x = position
positions = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
permissible_positions = []
for position in positions:
y_test, x_test = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(position)
return permissible_positions
def is_complete(board: list[list[int]]) -> bool:
"""
Check if the board (matrix) has been completely filled with non-zero values.
>>> is_complete([[1]])
True
>>> is_complete([[1, 2], [3, 0]])
False
"""
return not any(elem == 0 for row in board for elem in row)
def open_knight_tour_helper(
board: list[list[int]], pos: tuple[int, int], curr: int
) -> bool:
"""
Helper function to solve knight tour problem.
"""
if is_complete(board):
return True
for position in get_valid_pos(pos, len(board)):
y, x = position
if board[y][x] == 0:
board[y][x] = curr + 1
if open_knight_tour_helper(board, position, curr + 1):
return True
board[y][x] = 0
return False
def open_knight_tour(n: int) -> list[list[int]]:
"""
Find the solution for the knight tour problem for a board of size n. Raises
ValueError if the tour cannot be performed for the given size.
>>> open_knight_tour(1)
[[1]]
>>> open_knight_tour(2)
Traceback (most recent call last):
...
ValueError: Open Kight Tour cannot be performed on a board of size 2
"""
board = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(n):
board[i][j] = 1
if open_knight_tour_helper(board, (i, j), 1):
return board
board[i][j] = 0
raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}")
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 70: https://projecteuler.net/problem=70
Euler's Totient function, φ(n) [sometimes called the phi function], is used to
determine the number of positive numbers less than or equal to n which are
relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than
nine and relatively prime to nine, φ(9)=6.
The number 1 is considered to be relatively prime to every positive number, so
φ(1)=1.
Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation
of 79180.
Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and
the ratio n/φ(n) produces a minimum.
-----
This is essentially brute force. Calculate all totients up to 10^7 and
find the minimum ratio of n/φ(n) that way. To minimize the ratio, we want
to minimize n and maximize φ(n) as much as possible, so we can store the
minimum fraction's numerator and denominator and calculate new fractions
with each totient to compare against. To avoid dividing by zero, I opt to
use cross multiplication.
References:
Finding totients
https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula
"""
from __future__ import annotations
def get_totients(max_one: int) -> list[int]:
"""
Calculates a list of totients from 0 to max_one exclusive, using the
definition of Euler's product formula.
>>> get_totients(5)
[0, 1, 1, 2, 2]
>>> get_totients(10)
[0, 1, 1, 2, 2, 4, 2, 6, 4, 6]
"""
totients = [0] * max_one
for i in range(0, max_one):
totients[i] = i
for i in range(2, max_one):
if totients[i] == i:
for j in range(i, max_one, i):
totients[j] -= totients[j] // i
return totients
def has_same_digits(num1: int, num2: int) -> bool:
"""
Return True if num1 and num2 have the same frequency of every digit, False
otherwise.
>>> has_same_digits(123456789, 987654321)
True
>>> has_same_digits(123, 23)
False
>>> has_same_digits(1234566, 123456)
False
"""
return sorted(str(num1)) == sorted(str(num2))
def solution(max_n: int = 10000000) -> int:
"""
Finds the value of n from 1 to max such that n/φ(n) produces a minimum.
>>> solution(100)
21
>>> solution(10000)
4435
"""
min_numerator = 1 # i
min_denominator = 0 # φ(i)
totients = get_totients(max_n + 1)
for i in range(2, max_n + 1):
t = totients[i]
if i * min_denominator < min_numerator * t and has_same_digits(i, t):
min_numerator = i
min_denominator = t
return min_numerator
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 70: https://projecteuler.net/problem=70
Euler's Totient function, φ(n) [sometimes called the phi function], is used to
determine the number of positive numbers less than or equal to n which are
relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than
nine and relatively prime to nine, φ(9)=6.
The number 1 is considered to be relatively prime to every positive number, so
φ(1)=1.
Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation
of 79180.
Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and
the ratio n/φ(n) produces a minimum.
-----
This is essentially brute force. Calculate all totients up to 10^7 and
find the minimum ratio of n/φ(n) that way. To minimize the ratio, we want
to minimize n and maximize φ(n) as much as possible, so we can store the
minimum fraction's numerator and denominator and calculate new fractions
with each totient to compare against. To avoid dividing by zero, I opt to
use cross multiplication.
References:
Finding totients
https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula
"""
from __future__ import annotations
def get_totients(max_one: int) -> list[int]:
"""
Calculates a list of totients from 0 to max_one exclusive, using the
definition of Euler's product formula.
>>> get_totients(5)
[0, 1, 1, 2, 2]
>>> get_totients(10)
[0, 1, 1, 2, 2, 4, 2, 6, 4, 6]
"""
totients = [0] * max_one
for i in range(0, max_one):
totients[i] = i
for i in range(2, max_one):
if totients[i] == i:
for j in range(i, max_one, i):
totients[j] -= totients[j] // i
return totients
def has_same_digits(num1: int, num2: int) -> bool:
"""
Return True if num1 and num2 have the same frequency of every digit, False
otherwise.
>>> has_same_digits(123456789, 987654321)
True
>>> has_same_digits(123, 23)
False
>>> has_same_digits(1234566, 123456)
False
"""
return sorted(str(num1)) == sorted(str(num2))
def solution(max_n: int = 10000000) -> int:
"""
Finds the value of n from 1 to max such that n/φ(n) produces a minimum.
>>> solution(100)
21
>>> solution(10000)
4435
"""
min_numerator = 1 # i
min_denominator = 0 # φ(i)
totients = get_totients(max_n + 1)
for i in range(2, max_n + 1):
t = totients[i]
if i * min_denominator < min_numerator * t and has_same_digits(i, t):
min_numerator = i
min_denominator = t
return min_numerator
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Implementing Secant method in Python
Author: dimgrichr
"""
from math import exp
def f(x: float) -> float:
"""
>>> f(5)
39.98652410600183
"""
return 8 * x - 2 * exp(-x)
def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float:
"""
>>> secant_method(1, 3, 2)
0.2139409276214589
"""
x0 = lower_bound
x1 = upper_bound
for _ in range(0, repeats):
x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
return x1
if __name__ == "__main__":
print(f"Example: {secant_method(1, 3, 2)}")
| """
Implementing Secant method in Python
Author: dimgrichr
"""
from math import exp
def f(x: float) -> float:
"""
>>> f(5)
39.98652410600183
"""
return 8 * x - 2 * exp(-x)
def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float:
"""
>>> secant_method(1, 3, 2)
0.2139409276214589
"""
x0 = lower_bound
x1 = upper_bound
for _ in range(0, repeats):
x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
return x1
if __name__ == "__main__":
print(f"Example: {secant_method(1, 3, 2)}")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
This file fetches quotes from the " ZenQuotes API ".
It does not require any API key as it uses free tier.
For more details and premium features visit:
https://zenquotes.io/
"""
import pprint
import requests
def quote_of_the_day() -> list:
API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806
return requests.get(API_ENDPOINT_URL).json()
def random_quotes() -> list:
API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806
return requests.get(API_ENDPOINT_URL).json()
if __name__ == "__main__":
"""
response object has all the info with the quote
To retrieve the actual quote access the response.json() object as below
response.json() is a list of json object
response.json()[0]['q'] = actual quote.
response.json()[0]['a'] = author name.
response.json()[0]['h'] = in html format.
"""
response = random_quotes()
pprint.pprint(response)
| """
This file fetches quotes from the " ZenQuotes API ".
It does not require any API key as it uses free tier.
For more details and premium features visit:
https://zenquotes.io/
"""
import pprint
import requests
def quote_of_the_day() -> list:
API_ENDPOINT_URL = "https://zenquotes.io/api/today/" # noqa: N806
return requests.get(API_ENDPOINT_URL).json()
def random_quotes() -> list:
API_ENDPOINT_URL = "https://zenquotes.io/api/random/" # noqa: N806
return requests.get(API_ENDPOINT_URL).json()
if __name__ == "__main__":
"""
response object has all the info with the quote
To retrieve the actual quote access the response.json() object as below
response.json() is a list of json object
response.json()[0]['q'] = actual quote.
response.json()[0]['a'] = author name.
response.json()[0]['h'] = in html format.
"""
response = random_quotes()
pprint.pprint(response)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
min_primitive_root = 3
# I have written my code naively same as definition of primitive root
# however every time I run this program, memory exceeded...
# so I used 4.80 Algorithm in
# Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
# and it seems to run nicely!
def primitive_root(p_val: int) -> int:
print("Generating primitive root of p")
while True:
g = random.randrange(3, p_val)
if pow(g, 2, p_val) == 1:
continue
if pow(g, p_val, p_val) == 1:
continue
return g
def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("Generating prime p...")
p = rabin_miller.generate_large_prime(key_size) # select large prime number.
e_1 = primitive_root(p) # one primitive root on modulo p.
d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety.
e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p)
public_key = (key_size, e_1, e_2, p)
private_key = (key_size, d)
return public_key, private_key
def make_key_files(name: str, key_size: int) -> None:
if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
print("\nWARNING:")
print(
'"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program."
% (name, name)
)
sys.exit()
public_key, private_key = generate_key(key_size)
print(f"\nWriting public key to file {name}_pubkey.txt...")
with open(f"{name}_pubkey.txt", "w") as fo:
fo.write(
"%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3])
)
print(f"Writing private key to file {name}_privkey.txt...")
with open(f"{name}_privkey.txt", "w") as fo:
fo.write("%d,%d" % (private_key[0], private_key[1]))
def main() -> None:
print("Making key files...")
make_key_files("elgamal", 2048)
print("Key files generation successful")
if __name__ == "__main__":
main()
| import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
min_primitive_root = 3
# I have written my code naively same as definition of primitive root
# however every time I run this program, memory exceeded...
# so I used 4.80 Algorithm in
# Handbook of Applied Cryptography(CRC Press, ISBN : 0-8493-8523-7, October 1996)
# and it seems to run nicely!
def primitive_root(p_val: int) -> int:
print("Generating primitive root of p")
while True:
g = random.randrange(3, p_val)
if pow(g, 2, p_val) == 1:
continue
if pow(g, p_val, p_val) == 1:
continue
return g
def generate_key(key_size: int) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("Generating prime p...")
p = rabin_miller.generate_large_prime(key_size) # select large prime number.
e_1 = primitive_root(p) # one primitive root on modulo p.
d = random.randrange(3, p) # private_key -> have to be greater than 2 for safety.
e_2 = cryptomath.find_mod_inverse(pow(e_1, d, p), p)
public_key = (key_size, e_1, e_2, p)
private_key = (key_size, d)
return public_key, private_key
def make_key_files(name: str, key_size: int) -> None:
if os.path.exists(f"{name}_pubkey.txt") or os.path.exists(f"{name}_privkey.txt"):
print("\nWARNING:")
print(
'"%s_pubkey.txt" or "%s_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program."
% (name, name)
)
sys.exit()
public_key, private_key = generate_key(key_size)
print(f"\nWriting public key to file {name}_pubkey.txt...")
with open(f"{name}_pubkey.txt", "w") as fo:
fo.write(
"%d,%d,%d,%d" % (public_key[0], public_key[1], public_key[2], public_key[3])
)
print(f"Writing private key to file {name}_privkey.txt...")
with open(f"{name}_privkey.txt", "w") as fo:
fo.write("%d,%d" % (private_key[0], private_key[1]))
def main() -> None:
print("Making key files...")
make_key_files("elgamal", 2048)
print("Key files generation successful")
if __name__ == "__main__":
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
def kmp(pattern: str, text: str) -> bool:
"""
The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text
with complexity O(n + m)
1) Preprocess pattern to identify any suffixes that are identical to prefixes
This tells us where to continue from if we get a mismatch between a character
in our pattern and the text.
2) Step through the text one character at a time and compare it to a character in
the pattern updating our location within the pattern if necessary
"""
# 1) Construct the failure array
failure = get_failure_array(pattern)
# 2) Step through text searching for pattern
i, j = 0, 0 # index into text, pattern
while i < len(text):
if pattern[j] == text[i]:
if j == (len(pattern) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
j = failure[j - 1]
continue
i += 1
return False
def get_failure_array(pattern: str) -> list[int]:
"""
Calculates the new index we should go to if we fail a comparison
:param pattern:
:return:
"""
failure = [0]
i = 0
j = 1
while j < len(pattern):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
i = failure[i - 1]
continue
j += 1
failure.append(i)
return failure
if __name__ == "__main__":
# Test 1)
pattern = "abc1abc12"
text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc"
text2 = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, text1) and not kmp(pattern, text2)
# Test 2)
pattern = "ABABX"
text = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
pattern = "AAAB"
text = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
pattern = "abcdabcy"
text = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
pattern = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| from __future__ import annotations
def kmp(pattern: str, text: str) -> bool:
"""
The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text
with complexity O(n + m)
1) Preprocess pattern to identify any suffixes that are identical to prefixes
This tells us where to continue from if we get a mismatch between a character
in our pattern and the text.
2) Step through the text one character at a time and compare it to a character in
the pattern updating our location within the pattern if necessary
"""
# 1) Construct the failure array
failure = get_failure_array(pattern)
# 2) Step through text searching for pattern
i, j = 0, 0 # index into text, pattern
while i < len(text):
if pattern[j] == text[i]:
if j == (len(pattern) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
j = failure[j - 1]
continue
i += 1
return False
def get_failure_array(pattern: str) -> list[int]:
"""
Calculates the new index we should go to if we fail a comparison
:param pattern:
:return:
"""
failure = [0]
i = 0
j = 1
while j < len(pattern):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
i = failure[i - 1]
continue
j += 1
failure.append(i)
return failure
if __name__ == "__main__":
# Test 1)
pattern = "abc1abc12"
text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc"
text2 = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, text1) and not kmp(pattern, text2)
# Test 2)
pattern = "ABABX"
text = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
pattern = "AAAB"
text = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
pattern = "abcdabcy"
text = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
pattern = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import unittest
from timeit import timeit
def least_common_multiple_slow(first_num: int, second_num: int) -> int:
"""
Find the least common multiple of two numbers.
Learn more: https://en.wikipedia.org/wiki/Least_common_multiple
>>> least_common_multiple_slow(5, 2)
10
>>> least_common_multiple_slow(12, 76)
228
"""
max_num = first_num if first_num >= second_num else second_num
common_mult = max_num
while (common_mult % first_num > 0) or (common_mult % second_num > 0):
common_mult += max_num
return common_mult
def greatest_common_divisor(a: int, b: int) -> int:
"""
Calculate Greatest Common Divisor (GCD).
see greatest_common_divisor.py
>>> greatest_common_divisor(24, 40)
8
>>> greatest_common_divisor(1, 1)
1
>>> greatest_common_divisor(1, 800)
1
>>> greatest_common_divisor(11, 37)
1
>>> greatest_common_divisor(3, 5)
1
>>> greatest_common_divisor(16, 4)
4
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
def least_common_multiple_fast(first_num: int, second_num: int) -> int:
"""
Find the least common multiple of two numbers.
https://en.wikipedia.org/wiki/Least_common_multiple#Using_the_greatest_common_divisor
>>> least_common_multiple_fast(5,2)
10
>>> least_common_multiple_fast(12,76)
228
"""
return first_num // greatest_common_divisor(first_num, second_num) * second_num
def benchmark():
setup = (
"from __main__ import least_common_multiple_slow, least_common_multiple_fast"
)
print(
"least_common_multiple_slow():",
timeit("least_common_multiple_slow(1000, 999)", setup=setup),
)
print(
"least_common_multiple_fast():",
timeit("least_common_multiple_fast(1000, 999)", setup=setup),
)
class TestLeastCommonMultiple(unittest.TestCase):
test_inputs = [
(10, 20),
(13, 15),
(4, 31),
(10, 42),
(43, 34),
(5, 12),
(12, 25),
(10, 25),
(6, 9),
]
expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18]
def test_lcm_function(self):
for i, (first_num, second_num) in enumerate(self.test_inputs):
slow_result = least_common_multiple_slow(first_num, second_num)
fast_result = least_common_multiple_fast(first_num, second_num)
with self.subTest(i=i):
self.assertEqual(slow_result, self.expected_results[i])
self.assertEqual(fast_result, self.expected_results[i])
if __name__ == "__main__":
benchmark()
unittest.main()
| import unittest
from timeit import timeit
def least_common_multiple_slow(first_num: int, second_num: int) -> int:
"""
Find the least common multiple of two numbers.
Learn more: https://en.wikipedia.org/wiki/Least_common_multiple
>>> least_common_multiple_slow(5, 2)
10
>>> least_common_multiple_slow(12, 76)
228
"""
max_num = first_num if first_num >= second_num else second_num
common_mult = max_num
while (common_mult % first_num > 0) or (common_mult % second_num > 0):
common_mult += max_num
return common_mult
def greatest_common_divisor(a: int, b: int) -> int:
"""
Calculate Greatest Common Divisor (GCD).
see greatest_common_divisor.py
>>> greatest_common_divisor(24, 40)
8
>>> greatest_common_divisor(1, 1)
1
>>> greatest_common_divisor(1, 800)
1
>>> greatest_common_divisor(11, 37)
1
>>> greatest_common_divisor(3, 5)
1
>>> greatest_common_divisor(16, 4)
4
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
def least_common_multiple_fast(first_num: int, second_num: int) -> int:
"""
Find the least common multiple of two numbers.
https://en.wikipedia.org/wiki/Least_common_multiple#Using_the_greatest_common_divisor
>>> least_common_multiple_fast(5,2)
10
>>> least_common_multiple_fast(12,76)
228
"""
return first_num // greatest_common_divisor(first_num, second_num) * second_num
def benchmark():
setup = (
"from __main__ import least_common_multiple_slow, least_common_multiple_fast"
)
print(
"least_common_multiple_slow():",
timeit("least_common_multiple_slow(1000, 999)", setup=setup),
)
print(
"least_common_multiple_fast():",
timeit("least_common_multiple_fast(1000, 999)", setup=setup),
)
class TestLeastCommonMultiple(unittest.TestCase):
test_inputs = [
(10, 20),
(13, 15),
(4, 31),
(10, 42),
(43, 34),
(5, 12),
(12, 25),
(10, 25),
(6, 9),
]
expected_results = [20, 195, 124, 210, 1462, 60, 300, 50, 18]
def test_lcm_function(self):
for i, (first_num, second_num) in enumerate(self.test_inputs):
slow_result = least_common_multiple_slow(first_num, second_num)
fast_result = least_common_multiple_fast(first_num, second_num)
with self.subTest(i=i):
self.assertEqual(slow_result, self.expected_results[i])
self.assertEqual(fast_result, self.expected_results[i])
if __name__ == "__main__":
benchmark()
unittest.main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import sys
import webbrowser
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
res = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
soup = BeautifulSoup(res.text, "html.parser")
links = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"http://google.com{link.get('href')}")
| import sys
import webbrowser
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
res = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
soup = BeautifulSoup(res.text, "html.parser")
links = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"http://google.com{link.get('href')}")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| def send_file(filename: str = "mytext.txt", testing: bool = False) -> None:
import socket
port = 12312 # Reserve a port for your service.
sock = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
sock.bind((host, port)) # Bind to the port
sock.listen(5) # Now wait for client connection.
print("Server listening....")
while True:
conn, addr = sock.accept() # Establish connection with client.
print(f"Got connection from {addr}")
data = conn.recv(1024)
print(f"Server received: {data = }")
with open(filename, "rb") as in_file:
data = in_file.read(1024)
while data:
conn.send(data)
print(f"Sent {data!r}")
data = in_file.read(1024)
print("Done sending")
conn.close()
if testing: # Allow the test to complete
break
sock.shutdown(1)
sock.close()
if __name__ == "__main__":
send_file()
| def send_file(filename: str = "mytext.txt", testing: bool = False) -> None:
import socket
port = 12312 # Reserve a port for your service.
sock = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
sock.bind((host, port)) # Bind to the port
sock.listen(5) # Now wait for client connection.
print("Server listening....")
while True:
conn, addr = sock.accept() # Establish connection with client.
print(f"Got connection from {addr}")
data = conn.recv(1024)
print(f"Server received: {data = }")
with open(filename, "rb") as in_file:
data = in_file.read(1024)
while data:
conn.send(data)
print(f"Sent {data!r}")
data = in_file.read(1024)
print("Done sending")
conn.close()
if testing: # Allow the test to complete
break
sock.shutdown(1)
sock.close()
if __name__ == "__main__":
send_file()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Python3 program to evaluate a prefix expression.
"""
calc = {
"+": lambda x, y: x + y,
"-": lambda x, y: x - y,
"*": lambda x, y: x * y,
"/": lambda x, y: x / y,
}
def is_operand(c):
"""
Return True if the given char c is an operand, e.g. it is a number
>>> is_operand("1")
True
>>> is_operand("+")
False
"""
return c.isdigit()
def evaluate(expression):
"""
Evaluate a given expression in prefix notation.
Asserts that the given expression is valid.
>>> evaluate("+ 9 * 2 6")
21
>>> evaluate("/ * 10 2 + 4 1 ")
4.0
"""
stack = []
# iterate over the string in reverse order
for c in expression.split()[::-1]:
# push operand to stack
if is_operand(c):
stack.append(int(c))
else:
# pop values from stack can calculate the result
# push the result onto the stack again
o1 = stack.pop()
o2 = stack.pop()
stack.append(calc[c](o1, o2))
return stack.pop()
# Driver code
if __name__ == "__main__":
test_expression = "+ 9 * 2 6"
print(evaluate(test_expression))
test_expression = "/ * 10 2 + 4 1 "
print(evaluate(test_expression))
| """
Python3 program to evaluate a prefix expression.
"""
calc = {
"+": lambda x, y: x + y,
"-": lambda x, y: x - y,
"*": lambda x, y: x * y,
"/": lambda x, y: x / y,
}
def is_operand(c):
"""
Return True if the given char c is an operand, e.g. it is a number
>>> is_operand("1")
True
>>> is_operand("+")
False
"""
return c.isdigit()
def evaluate(expression):
"""
Evaluate a given expression in prefix notation.
Asserts that the given expression is valid.
>>> evaluate("+ 9 * 2 6")
21
>>> evaluate("/ * 10 2 + 4 1 ")
4.0
"""
stack = []
# iterate over the string in reverse order
for c in expression.split()[::-1]:
# push operand to stack
if is_operand(c):
stack.append(int(c))
else:
# pop values from stack can calculate the result
# push the result onto the stack again
o1 = stack.pop()
o2 = stack.pop()
stack.append(calc[c](o1, o2))
return stack.pop()
# Driver code
if __name__ == "__main__":
test_expression = "+ 9 * 2 6"
print(evaluate(test_expression))
test_expression = "/ * 10 2 + 4 1 "
print(evaluate(test_expression))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine
Video explanation: https://youtu.be/QwQVMqfoB2E
Also check out Numberphile's and Computerphile's videos on this topic
This module contains function 'enigma' which emulates
the famous Enigma machine from WWII.
Module includes:
- enigma function
- showcase of function usage
- 9 randomly generated rotors
- reflector (aka static rotor)
- original alphabet
Created by TrapinchO
"""
from __future__ import annotations
RotorPositionT = tuple[int, int, int]
RotorSelectionT = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
rotor1 = "EGZWVONAHDCLFQMSIPJBYUKXTR"
rotor2 = "FOBHMDKEXQNRAULPGSJVTYICZW"
rotor3 = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
reflector = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
rotor4 = "RMDJXFUWGISLHVTCQNKYPBEZOA"
rotor5 = "SGLCPQWZHKXAREONTFBVIYJUDM"
rotor6 = "HVSICLTYKQUBXDWAJZOMFGPREN"
rotor7 = "RZWQHFMVDBKICJLNTUXAGYPSOE"
rotor8 = "LFKIJODBEGAMQPXVUHYSTCZRWN"
rotor9 = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _validator(
rotpos: RotorPositionT, rotsel: RotorSelectionT, pb: str
) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""
Checks if the values can be used for the 'enigma' function
>>> _validator((1,1,1), (rotor1, rotor2, rotor3), 'POLAND')
((1, 1, 1), ('EGZWVONAHDCLFQMSIPJBYUKXTR', 'FOBHMDKEXQNRAULPGSJVTYICZW', \
'ZJXESIUQLHAVRMDOYGTNFWPBKC'), \
{'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'})
:param rotpos: rotor_positon
:param rotsel: rotor_selection
:param pb: plugb -> validated and transformed
:return: (rotpos, rotsel, pb)
"""
# Checks if there are 3 unique rotors
unique_rotsel = len(set(rotsel))
if unique_rotsel < 3:
raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})")
# Checks if rotor positions are valid
rotorpos1, rotorpos2, rotorpos3 = rotpos
if not 0 < rotorpos1 <= len(abc):
raise ValueError(
"First rotor position is not within range of 1..26 (" f"{rotorpos1}"
)
if not 0 < rotorpos2 <= len(abc):
raise ValueError(
"Second rotor position is not within range of 1..26 (" f"{rotorpos2})"
)
if not 0 < rotorpos3 <= len(abc):
raise ValueError(
"Third rotor position is not within range of 1..26 (" f"{rotorpos3})"
)
# Validates string and returns dict
pbdict = _plugboard(pb)
return rotpos, rotsel, pbdict
def _plugboard(pbstring: str) -> dict[str, str]:
"""
https://en.wikipedia.org/wiki/Enigma_machine#Plugboard
>>> _plugboard('PICTURES')
{'P': 'I', 'I': 'P', 'C': 'T', 'T': 'C', 'U': 'R', 'R': 'U', 'E': 'S', 'S': 'E'}
>>> _plugboard('POLAND')
{'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'}
In the code, 'pb' stands for 'plugboard'
Pairs can be separated by spaces
:param pbstring: string containing plugboard setting for the Enigma machine
:return: dictionary containing converted pairs
"""
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(pbstring, str):
raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})")
elif len(pbstring) % 2 != 0:
raise Exception(f"Odd number of symbols ({len(pbstring)})")
elif pbstring == "":
return {}
pbstring.replace(" ", "")
# Checks if all characters are unique
tmppbl = set()
for i in pbstring:
if i not in abc:
raise Exception(f"'{i}' not in list of symbols")
elif i in tmppbl:
raise Exception(f"Duplicate symbol ({i})")
else:
tmppbl.add(i)
del tmppbl
# Created the dictionary
pb = {}
for j in range(0, len(pbstring) - 1, 2):
pb[pbstring[j]] = pbstring[j + 1]
pb[pbstring[j + 1]] = pbstring[j]
return pb
def enigma(
text: str,
rotor_position: RotorPositionT,
rotor_selection: RotorSelectionT = (rotor1, rotor2, rotor3),
plugb: str = "",
) -> str:
"""
The only difference with real-world enigma is that I allowed string input.
All characters are converted to uppercase. (non-letter symbol are ignored)
How it works:
(for every letter in the message)
- Input letter goes into the plugboard.
If it is connected to another one, switch it.
- Letter goes through 3 rotors.
Each rotor can be represented as 2 sets of symbol, where one is shuffled.
Each symbol from the first set has corresponding symbol in
the second set and vice versa.
example:
| ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F
| VKLEPDBGRNWTFCJOHQAMUZYIXS |
- Symbol then goes through reflector (static rotor).
There it is switched with paired symbol
The reflector can be represented as2 sets, each with half of the alphanet.
There are usually 10 pairs of letters.
Example:
| ABCDEFGHIJKLM | e.g. E is paired to X
| ZYXWVUTSRQPON | so when E goes in X goes out and vice versa
- Letter then goes through the rotors again
- If the letter is connected to plugboard, it is switched.
- Return the letter
>>> enigma('Hello World!', (1, 2, 1), plugb='pictures')
'KORYH JUHHI!'
>>> enigma('KORYH, juhhi!', (1, 2, 1), plugb='pictures')
'HELLO, WORLD!'
>>> enigma('hello world!', (1, 1, 1), plugb='pictures')
'FPNCZ QWOBU!'
>>> enigma('FPNCZ QWOBU', (1, 1, 1), plugb='pictures')
'HELLO WORLD'
:param text: input message
:param rotor_position: tuple with 3 values in range 1..26
:param rotor_selection: tuple with 3 rotors ()
:param plugb: string containing plugboard configuration (default '')
:return: en/decrypted string
"""
text = text.upper()
rotor_position, rotor_selection, plugboard = _validator(
rotor_position, rotor_selection, plugb.upper()
)
rotorpos1, rotorpos2, rotorpos3 = rotor_position
rotor1, rotor2, rotor3 = rotor_selection
rotorpos1 -= 1
rotorpos2 -= 1
rotorpos3 -= 1
result = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
symbol = plugboard[symbol]
# rotor ra --------------------------
index = abc.index(symbol) + rotorpos1
symbol = rotor1[index % len(abc)]
# rotor rb --------------------------
index = abc.index(symbol) + rotorpos2
symbol = rotor2[index % len(abc)]
# rotor rc --------------------------
index = abc.index(symbol) + rotorpos3
symbol = rotor3[index % len(abc)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
symbol = reflector[symbol]
# 2nd rotors
symbol = abc[rotor3.index(symbol) - rotorpos3]
symbol = abc[rotor2.index(symbol) - rotorpos2]
symbol = abc[rotor1.index(symbol) - rotorpos1]
# 2nd plugboard
if symbol in plugboard:
symbol = plugboard[symbol]
# moves/resets rotor positions
rotorpos1 += 1
if rotorpos1 >= len(abc):
rotorpos1 = 0
rotorpos2 += 1
if rotorpos2 >= len(abc):
rotorpos2 = 0
rotorpos3 += 1
if rotorpos3 >= len(abc):
rotorpos3 = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(symbol)
return "".join(result)
if __name__ == "__main__":
message = "This is my Python script that emulates the Enigma machine from WWII."
rotor_pos = (1, 1, 1)
pb = "pictures"
rotor_sel = (rotor2, rotor4, rotor8)
en = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| """
Wikipedia: https://en.wikipedia.org/wiki/Enigma_machine
Video explanation: https://youtu.be/QwQVMqfoB2E
Also check out Numberphile's and Computerphile's videos on this topic
This module contains function 'enigma' which emulates
the famous Enigma machine from WWII.
Module includes:
- enigma function
- showcase of function usage
- 9 randomly generated rotors
- reflector (aka static rotor)
- original alphabet
Created by TrapinchO
"""
from __future__ import annotations
RotorPositionT = tuple[int, int, int]
RotorSelectionT = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
abc = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
rotor1 = "EGZWVONAHDCLFQMSIPJBYUKXTR"
rotor2 = "FOBHMDKEXQNRAULPGSJVTYICZW"
rotor3 = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
reflector = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
rotor4 = "RMDJXFUWGISLHVTCQNKYPBEZOA"
rotor5 = "SGLCPQWZHKXAREONTFBVIYJUDM"
rotor6 = "HVSICLTYKQUBXDWAJZOMFGPREN"
rotor7 = "RZWQHFMVDBKICJLNTUXAGYPSOE"
rotor8 = "LFKIJODBEGAMQPXVUHYSTCZRWN"
rotor9 = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def _validator(
rotpos: RotorPositionT, rotsel: RotorSelectionT, pb: str
) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""
Checks if the values can be used for the 'enigma' function
>>> _validator((1,1,1), (rotor1, rotor2, rotor3), 'POLAND')
((1, 1, 1), ('EGZWVONAHDCLFQMSIPJBYUKXTR', 'FOBHMDKEXQNRAULPGSJVTYICZW', \
'ZJXESIUQLHAVRMDOYGTNFWPBKC'), \
{'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'})
:param rotpos: rotor_positon
:param rotsel: rotor_selection
:param pb: plugb -> validated and transformed
:return: (rotpos, rotsel, pb)
"""
# Checks if there are 3 unique rotors
unique_rotsel = len(set(rotsel))
if unique_rotsel < 3:
raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})")
# Checks if rotor positions are valid
rotorpos1, rotorpos2, rotorpos3 = rotpos
if not 0 < rotorpos1 <= len(abc):
raise ValueError(
"First rotor position is not within range of 1..26 (" f"{rotorpos1}"
)
if not 0 < rotorpos2 <= len(abc):
raise ValueError(
"Second rotor position is not within range of 1..26 (" f"{rotorpos2})"
)
if not 0 < rotorpos3 <= len(abc):
raise ValueError(
"Third rotor position is not within range of 1..26 (" f"{rotorpos3})"
)
# Validates string and returns dict
pbdict = _plugboard(pb)
return rotpos, rotsel, pbdict
def _plugboard(pbstring: str) -> dict[str, str]:
"""
https://en.wikipedia.org/wiki/Enigma_machine#Plugboard
>>> _plugboard('PICTURES')
{'P': 'I', 'I': 'P', 'C': 'T', 'T': 'C', 'U': 'R', 'R': 'U', 'E': 'S', 'S': 'E'}
>>> _plugboard('POLAND')
{'P': 'O', 'O': 'P', 'L': 'A', 'A': 'L', 'N': 'D', 'D': 'N'}
In the code, 'pb' stands for 'plugboard'
Pairs can be separated by spaces
:param pbstring: string containing plugboard setting for the Enigma machine
:return: dictionary containing converted pairs
"""
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(pbstring, str):
raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})")
elif len(pbstring) % 2 != 0:
raise Exception(f"Odd number of symbols ({len(pbstring)})")
elif pbstring == "":
return {}
pbstring.replace(" ", "")
# Checks if all characters are unique
tmppbl = set()
for i in pbstring:
if i not in abc:
raise Exception(f"'{i}' not in list of symbols")
elif i in tmppbl:
raise Exception(f"Duplicate symbol ({i})")
else:
tmppbl.add(i)
del tmppbl
# Created the dictionary
pb = {}
for j in range(0, len(pbstring) - 1, 2):
pb[pbstring[j]] = pbstring[j + 1]
pb[pbstring[j + 1]] = pbstring[j]
return pb
def enigma(
text: str,
rotor_position: RotorPositionT,
rotor_selection: RotorSelectionT = (rotor1, rotor2, rotor3),
plugb: str = "",
) -> str:
"""
The only difference with real-world enigma is that I allowed string input.
All characters are converted to uppercase. (non-letter symbol are ignored)
How it works:
(for every letter in the message)
- Input letter goes into the plugboard.
If it is connected to another one, switch it.
- Letter goes through 3 rotors.
Each rotor can be represented as 2 sets of symbol, where one is shuffled.
Each symbol from the first set has corresponding symbol in
the second set and vice versa.
example:
| ABCDEFGHIJKLMNOPQRSTUVWXYZ | e.g. F=D and D=F
| VKLEPDBGRNWTFCJOHQAMUZYIXS |
- Symbol then goes through reflector (static rotor).
There it is switched with paired symbol
The reflector can be represented as2 sets, each with half of the alphanet.
There are usually 10 pairs of letters.
Example:
| ABCDEFGHIJKLM | e.g. E is paired to X
| ZYXWVUTSRQPON | so when E goes in X goes out and vice versa
- Letter then goes through the rotors again
- If the letter is connected to plugboard, it is switched.
- Return the letter
>>> enigma('Hello World!', (1, 2, 1), plugb='pictures')
'KORYH JUHHI!'
>>> enigma('KORYH, juhhi!', (1, 2, 1), plugb='pictures')
'HELLO, WORLD!'
>>> enigma('hello world!', (1, 1, 1), plugb='pictures')
'FPNCZ QWOBU!'
>>> enigma('FPNCZ QWOBU', (1, 1, 1), plugb='pictures')
'HELLO WORLD'
:param text: input message
:param rotor_position: tuple with 3 values in range 1..26
:param rotor_selection: tuple with 3 rotors ()
:param plugb: string containing plugboard configuration (default '')
:return: en/decrypted string
"""
text = text.upper()
rotor_position, rotor_selection, plugboard = _validator(
rotor_position, rotor_selection, plugb.upper()
)
rotorpos1, rotorpos2, rotorpos3 = rotor_position
rotor1, rotor2, rotor3 = rotor_selection
rotorpos1 -= 1
rotorpos2 -= 1
rotorpos3 -= 1
result = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
symbol = plugboard[symbol]
# rotor ra --------------------------
index = abc.index(symbol) + rotorpos1
symbol = rotor1[index % len(abc)]
# rotor rb --------------------------
index = abc.index(symbol) + rotorpos2
symbol = rotor2[index % len(abc)]
# rotor rc --------------------------
index = abc.index(symbol) + rotorpos3
symbol = rotor3[index % len(abc)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
symbol = reflector[symbol]
# 2nd rotors
symbol = abc[rotor3.index(symbol) - rotorpos3]
symbol = abc[rotor2.index(symbol) - rotorpos2]
symbol = abc[rotor1.index(symbol) - rotorpos1]
# 2nd plugboard
if symbol in plugboard:
symbol = plugboard[symbol]
# moves/resets rotor positions
rotorpos1 += 1
if rotorpos1 >= len(abc):
rotorpos1 = 0
rotorpos2 += 1
if rotorpos2 >= len(abc):
rotorpos2 = 0
rotorpos3 += 1
if rotorpos3 >= len(abc):
rotorpos3 = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(symbol)
return "".join(result)
if __name__ == "__main__":
message = "This is my Python script that emulates the Enigma machine from WWII."
rotor_pos = (1, 1, 1)
pb = "pictures"
rotor_sel = (rotor2, rotor4, rotor8)
en = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Problem 39: https://projecteuler.net/problem=39
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
"""
from __future__ import annotations
import typing
from collections import Counter
def pythagorean_triple(max_perimeter: int) -> typing.Counter[int]:
"""
Returns a dictionary with keys as the perimeter of a right angled triangle
and value as the number of corresponding triplets.
>>> pythagorean_triple(15)
Counter({12: 1})
>>> pythagorean_triple(40)
Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1})
>>> pythagorean_triple(50)
Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1, 48: 1})
"""
triplets: typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1):
for perpendicular in range(base, max_perimeter + 1):
hypotenuse = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(hypotenuse):
perimeter = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def solution(n: int = 1000) -> int:
"""
Returns perimeter with maximum solutions.
>>> solution(100)
90
>>> solution(200)
180
>>> solution(1000)
840
"""
triplets = pythagorean_triple(n)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| """
Problem 39: https://projecteuler.net/problem=39
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
"""
from __future__ import annotations
import typing
from collections import Counter
def pythagorean_triple(max_perimeter: int) -> typing.Counter[int]:
"""
Returns a dictionary with keys as the perimeter of a right angled triangle
and value as the number of corresponding triplets.
>>> pythagorean_triple(15)
Counter({12: 1})
>>> pythagorean_triple(40)
Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1})
>>> pythagorean_triple(50)
Counter({12: 1, 30: 1, 24: 1, 40: 1, 36: 1, 48: 1})
"""
triplets: typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1):
for perpendicular in range(base, max_perimeter + 1):
hypotenuse = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(hypotenuse):
perimeter = int(base + perpendicular + hypotenuse)
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def solution(n: int = 1000) -> int:
"""
Returns perimeter with maximum solutions.
>>> solution(100)
90
>>> solution(200)
180
>>> solution(1000)
840
"""
triplets = pythagorean_triple(n)
return triplets.most_common(1)[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from collections import deque
class Process:
def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None:
self.process_name = process_name # process name
self.arrival_time = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
self.stop_time = arrival_time
self.burst_time = burst_time # remaining burst time
self.waiting_time = 0 # total time of the process wait in ready queue
self.turnaround_time = 0 # time from arrival time to completion time
class MLFQ:
"""
MLFQ(Multi Level Feedback Queue)
https://en.wikipedia.org/wiki/Multilevel_feedback_queue
MLFQ has a lot of queues that have different priority
In this MLFQ,
The first Queue(0) to last second Queue(N-2) of MLFQ have Round Robin Algorithm
The last Queue(N-1) has First Come, First Served Algorithm
"""
def __init__(
self,
number_of_queues: int,
time_slices: list[int],
queue: deque[Process],
current_time: int,
) -> None:
# total number of mlfq's queues
self.number_of_queues = number_of_queues
# time slice of queues that round robin algorithm applied
self.time_slices = time_slices
# unfinished process is in this ready_queue
self.ready_queue = queue
# current time
self.current_time = current_time
# finished process is in this sequence queue
self.finish_queue: deque[Process] = deque()
def calculate_sequence_of_finish_queue(self) -> list[str]:
"""
This method returns the sequence of finished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
sequence = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def calculate_waiting_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates waiting time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_waiting_time([P1, P2, P3, P4])
[83, 17, 94, 101]
"""
waiting_times = []
for i in range(len(queue)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def calculate_turnaround_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates turnaround time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
turnaround_times = []
for i in range(len(queue)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def calculate_completion_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates completion time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
completion_times = []
for i in range(len(queue)):
completion_times.append(queue[i].stop_time)
return completion_times
def calculate_remaining_burst_time_of_processes(
self, queue: deque[Process]
) -> list[int]:
"""
This method calculate remaining burst time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(deque([P1, P2, P3, P4]), 17)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[36, 51, 7]
>>> finish_queue, ready_queue = mlfq.round_robin(ready_queue, 25)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0, 0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[11, 26]
"""
return [q.burst_time for q in queue]
def update_waiting_time(self, process: Process) -> int:
"""
This method updates waiting times of unfinished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> mlfq.current_time = 10
>>> P1.stop_time = 5
>>> mlfq.update_waiting_time(P1)
5
"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def first_come_first_served(self, ready_queue: deque[Process]) -> deque[Process]:
"""
FCFS(First Come, First Served)
FCFS will be applied to MLFQ's last queue
A first came process will be finished at first
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.first_come_first_served(mlfq.ready_queue)
>>> mlfq.calculate_sequence_of_finish_queue()
['P1', 'P2', 'P3', 'P4']
"""
finished: deque[Process] = deque() # sequence deque of finished process
while len(ready_queue) != 0:
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(cp)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
cp.burst_time = 0
# set the process's turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# set the completion time
cp.stop_time = self.current_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def round_robin(
self, ready_queue: deque[Process], time_slice: int
) -> tuple[deque[Process], deque[Process]]:
"""
RR(Round Robin)
RR will be applied to MLFQ's all queues except last queue
All processes can't use CPU for time more than time_slice
If the process consume CPU up to time_slice, it will go back to ready queue
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(mlfq.ready_queue, 17)
>>> mlfq.calculate_sequence_of_finish_queue()
['P2']
"""
finished: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(ready_queue)):
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(cp)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
cp.stop_time = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(cp)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
cp.burst_time = 0
# set the finish time
cp.stop_time = self.current_time
# update the process' turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def multi_level_feedback_queue(self) -> deque[Process]:
"""
MLFQ(Multi Level Feedback Queue)
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1):
finished, self.ready_queue = self.round_robin(
self.ready_queue, self.time_slices[i]
)
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
if len(time_slices) != number_of_queues - 1:
exit()
doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])})
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
mlfq = MLFQ(number_of_queues, time_slices, queue, 0)
finish_queue = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [P1, P2, P3, P4])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [P1, P2, P3, P4])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [P1, P2, P3, P4])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| from collections import deque
class Process:
def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None:
self.process_name = process_name # process name
self.arrival_time = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
self.stop_time = arrival_time
self.burst_time = burst_time # remaining burst time
self.waiting_time = 0 # total time of the process wait in ready queue
self.turnaround_time = 0 # time from arrival time to completion time
class MLFQ:
"""
MLFQ(Multi Level Feedback Queue)
https://en.wikipedia.org/wiki/Multilevel_feedback_queue
MLFQ has a lot of queues that have different priority
In this MLFQ,
The first Queue(0) to last second Queue(N-2) of MLFQ have Round Robin Algorithm
The last Queue(N-1) has First Come, First Served Algorithm
"""
def __init__(
self,
number_of_queues: int,
time_slices: list[int],
queue: deque[Process],
current_time: int,
) -> None:
# total number of mlfq's queues
self.number_of_queues = number_of_queues
# time slice of queues that round robin algorithm applied
self.time_slices = time_slices
# unfinished process is in this ready_queue
self.ready_queue = queue
# current time
self.current_time = current_time
# finished process is in this sequence queue
self.finish_queue: deque[Process] = deque()
def calculate_sequence_of_finish_queue(self) -> list[str]:
"""
This method returns the sequence of finished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
sequence = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def calculate_waiting_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates waiting time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_waiting_time([P1, P2, P3, P4])
[83, 17, 94, 101]
"""
waiting_times = []
for i in range(len(queue)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def calculate_turnaround_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates turnaround time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
turnaround_times = []
for i in range(len(queue)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def calculate_completion_time(self, queue: list[Process]) -> list[int]:
"""
This method calculates completion time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_turnaround_time([P1, P2, P3, P4])
[136, 34, 162, 125]
"""
completion_times = []
for i in range(len(queue)):
completion_times.append(queue[i].stop_time)
return completion_times
def calculate_remaining_burst_time_of_processes(
self, queue: deque[Process]
) -> list[int]:
"""
This method calculate remaining burst time of processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(deque([P1, P2, P3, P4]), 17)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[36, 51, 7]
>>> finish_queue, ready_queue = mlfq.round_robin(ready_queue, 25)
>>> mlfq.calculate_remaining_burst_time_of_processes(mlfq.finish_queue)
[0, 0]
>>> mlfq.calculate_remaining_burst_time_of_processes(ready_queue)
[11, 26]
"""
return [q.burst_time for q in queue]
def update_waiting_time(self, process: Process) -> int:
"""
This method updates waiting times of unfinished processes
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> mlfq.current_time = 10
>>> P1.stop_time = 5
>>> mlfq.update_waiting_time(P1)
5
"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def first_come_first_served(self, ready_queue: deque[Process]) -> deque[Process]:
"""
FCFS(First Come, First Served)
FCFS will be applied to MLFQ's last queue
A first came process will be finished at first
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> _ = mlfq.first_come_first_served(mlfq.ready_queue)
>>> mlfq.calculate_sequence_of_finish_queue()
['P1', 'P2', 'P3', 'P4']
"""
finished: deque[Process] = deque() # sequence deque of finished process
while len(ready_queue) != 0:
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(cp)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
cp.burst_time = 0
# set the process's turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# set the completion time
cp.stop_time = self.current_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def round_robin(
self, ready_queue: deque[Process], time_slice: int
) -> tuple[deque[Process], deque[Process]]:
"""
RR(Round Robin)
RR will be applied to MLFQ's all queues except last queue
All processes can't use CPU for time more than time_slice
If the process consume CPU up to time_slice, it will go back to ready queue
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue, ready_queue = mlfq.round_robin(mlfq.ready_queue, 17)
>>> mlfq.calculate_sequence_of_finish_queue()
['P2']
"""
finished: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(ready_queue)):
cp = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(cp)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
cp.stop_time = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(cp)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
cp.burst_time = 0
# set the finish time
cp.stop_time = self.current_time
# update the process' turnaround time because it is finished
cp.turnaround_time = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(cp)
self.finish_queue.extend(finished) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def multi_level_feedback_queue(self) -> deque[Process]:
"""
MLFQ(Multi Level Feedback Queue)
>>> P1 = Process("P1", 0, 53)
>>> P2 = Process("P2", 0, 17)
>>> P3 = Process("P3", 0, 68)
>>> P4 = Process("P4", 0, 24)
>>> mlfq = MLFQ(3, [17, 25], deque([P1, P2, P3, P4]), 0)
>>> finish_queue = mlfq.multi_level_feedback_queue()
>>> mlfq.calculate_sequence_of_finish_queue()
['P2', 'P4', 'P1', 'P3']
"""
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1):
finished, self.ready_queue = self.round_robin(
self.ready_queue, self.time_slices[i]
)
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
if len(time_slices) != number_of_queues - 1:
exit()
doctest.testmod(extraglobs={"queue": deque([P1, P2, P3, P4])})
P1 = Process("P1", 0, 53)
P2 = Process("P2", 0, 17)
P3 = Process("P3", 0, 68)
P4 = Process("P4", 0, 24)
number_of_queues = 3
time_slices = [17, 25]
queue = deque([P1, P2, P3, P4])
mlfq = MLFQ(number_of_queues, time_slices, queue, 0)
finish_queue = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [P1, P2, P3, P4])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [P1, P2, P3, P4])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [P1, P2, P3, P4])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Resources:
- https://en.wikipedia.org/wiki/Conjugate_gradient_method
- https://en.wikipedia.org/wiki/Definite_symmetric_matrix
"""
from typing import Any
import numpy as np
def _is_matrix_spd(matrix: np.ndarray) -> bool:
"""
Returns True if input matrix is symmetric positive definite.
Returns False otherwise.
For a matrix to be SPD, all eigenvalues must be positive.
>>> import numpy as np
>>> matrix = np.array([
... [4.12401784, -5.01453636, -0.63865857],
... [-5.01453636, 12.33347422, -3.40493586],
... [-0.63865857, -3.40493586, 5.78591885]])
>>> _is_matrix_spd(matrix)
True
>>> matrix = np.array([
... [0.34634879, 1.96165514, 2.18277744],
... [0.74074469, -1.19648894, -1.34223498],
... [-0.7687067 , 0.06018373, -1.16315631]])
>>> _is_matrix_spd(matrix)
False
"""
# Ensure matrix is square.
assert np.shape(matrix)[0] == np.shape(matrix)[1]
# If matrix not symmetric, exit right away.
if np.allclose(matrix, matrix.T) is False:
return False
# Get eigenvalues and eignevectors for a symmetric matrix.
eigen_values, _ = np.linalg.eigh(matrix)
# Check sign of all eigenvalues.
# np.all returns a value of type np.bool_
return bool(np.all(eigen_values > 0))
def _create_spd_matrix(dimension: int) -> Any:
"""
Returns a symmetric positive definite matrix given a dimension.
Input:
dimension gives the square matrix dimension.
Output:
spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix.
>>> import numpy as np
>>> dimension = 3
>>> spd_matrix = _create_spd_matrix(dimension)
>>> _is_matrix_spd(spd_matrix)
True
"""
random_matrix = np.random.randn(dimension, dimension)
spd_matrix = np.dot(random_matrix, random_matrix.T)
assert _is_matrix_spd(spd_matrix)
return spd_matrix
def conjugate_gradient(
spd_matrix: np.ndarray,
load_vector: np.ndarray,
max_iterations: int = 1000,
tol: float = 1e-8,
) -> Any:
"""
Returns solution to the linear system np.dot(spd_matrix, x) = b.
Input:
spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix.
load_vector is an Nx1 vector.
Output:
x is an Nx1 vector that is the solution vector.
>>> import numpy as np
>>> spd_matrix = np.array([
... [8.73256573, -5.02034289, -2.68709226],
... [-5.02034289, 3.78188322, 0.91980451],
... [-2.68709226, 0.91980451, 1.94746467]])
>>> b = np.array([
... [-5.80872761],
... [ 3.23807431],
... [ 1.95381422]])
>>> conjugate_gradient(spd_matrix, b)
array([[-0.63114139],
[-0.01561498],
[ 0.13979294]])
"""
# Ensure proper dimensionality.
assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
assert _is_matrix_spd(spd_matrix)
# Initialize solution guess, residual, search direction.
x0 = np.zeros((np.shape(load_vector)[0], 1))
r0 = np.copy(load_vector)
p0 = np.copy(r0)
# Set initial errors in solution guess and residual.
error_residual = 1e9
error_x_solution = 1e9
error = 1e9
# Set iteration counter to threshold number of iterations.
iterations = 0
while error > tol:
# Save this value so we only calculate the matrix-vector product once.
w = np.dot(spd_matrix, p0)
# The main algorithm.
# Update search direction magnitude.
alpha = np.dot(r0.T, r0) / np.dot(p0.T, w)
# Update solution guess.
x = x0 + alpha * p0
# Calculate new residual.
r = r0 - alpha * w
# Calculate new Krylov subspace scale.
beta = np.dot(r.T, r) / np.dot(r0.T, r0)
# Calculate new A conjuage search direction.
p = r + beta * p0
# Calculate errors.
error_residual = np.linalg.norm(r - r0)
error_x_solution = np.linalg.norm(x - x0)
error = np.maximum(error_residual, error_x_solution)
# Update variables.
x0 = np.copy(x)
r0 = np.copy(r)
p0 = np.copy(p)
# Update number of iterations.
iterations += 1
if iterations > max_iterations:
break
return x
def test_conjugate_gradient() -> None:
"""
>>> test_conjugate_gradient() # self running tests
"""
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
x_true = np.random.randn(dimension, 1)
b = np.dot(spd_matrix, x_true)
# Numpy solution.
x_numpy = np.linalg.solve(spd_matrix, b)
# Our implementation.
x_conjugate_gradient = conjugate_gradient(spd_matrix, b)
# Ensure both solutions are close to x_true (and therefore one another).
assert np.linalg.norm(x_numpy - x_true) <= 1e-6
assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_conjugate_gradient()
| """
Resources:
- https://en.wikipedia.org/wiki/Conjugate_gradient_method
- https://en.wikipedia.org/wiki/Definite_symmetric_matrix
"""
from typing import Any
import numpy as np
def _is_matrix_spd(matrix: np.ndarray) -> bool:
"""
Returns True if input matrix is symmetric positive definite.
Returns False otherwise.
For a matrix to be SPD, all eigenvalues must be positive.
>>> import numpy as np
>>> matrix = np.array([
... [4.12401784, -5.01453636, -0.63865857],
... [-5.01453636, 12.33347422, -3.40493586],
... [-0.63865857, -3.40493586, 5.78591885]])
>>> _is_matrix_spd(matrix)
True
>>> matrix = np.array([
... [0.34634879, 1.96165514, 2.18277744],
... [0.74074469, -1.19648894, -1.34223498],
... [-0.7687067 , 0.06018373, -1.16315631]])
>>> _is_matrix_spd(matrix)
False
"""
# Ensure matrix is square.
assert np.shape(matrix)[0] == np.shape(matrix)[1]
# If matrix not symmetric, exit right away.
if np.allclose(matrix, matrix.T) is False:
return False
# Get eigenvalues and eignevectors for a symmetric matrix.
eigen_values, _ = np.linalg.eigh(matrix)
# Check sign of all eigenvalues.
# np.all returns a value of type np.bool_
return bool(np.all(eigen_values > 0))
def _create_spd_matrix(dimension: int) -> Any:
"""
Returns a symmetric positive definite matrix given a dimension.
Input:
dimension gives the square matrix dimension.
Output:
spd_matrix is an diminesion x dimensions symmetric positive definite (SPD) matrix.
>>> import numpy as np
>>> dimension = 3
>>> spd_matrix = _create_spd_matrix(dimension)
>>> _is_matrix_spd(spd_matrix)
True
"""
random_matrix = np.random.randn(dimension, dimension)
spd_matrix = np.dot(random_matrix, random_matrix.T)
assert _is_matrix_spd(spd_matrix)
return spd_matrix
def conjugate_gradient(
spd_matrix: np.ndarray,
load_vector: np.ndarray,
max_iterations: int = 1000,
tol: float = 1e-8,
) -> Any:
"""
Returns solution to the linear system np.dot(spd_matrix, x) = b.
Input:
spd_matrix is an NxN Symmetric Positive Definite (SPD) matrix.
load_vector is an Nx1 vector.
Output:
x is an Nx1 vector that is the solution vector.
>>> import numpy as np
>>> spd_matrix = np.array([
... [8.73256573, -5.02034289, -2.68709226],
... [-5.02034289, 3.78188322, 0.91980451],
... [-2.68709226, 0.91980451, 1.94746467]])
>>> b = np.array([
... [-5.80872761],
... [ 3.23807431],
... [ 1.95381422]])
>>> conjugate_gradient(spd_matrix, b)
array([[-0.63114139],
[-0.01561498],
[ 0.13979294]])
"""
# Ensure proper dimensionality.
assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]
assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0]
assert _is_matrix_spd(spd_matrix)
# Initialize solution guess, residual, search direction.
x0 = np.zeros((np.shape(load_vector)[0], 1))
r0 = np.copy(load_vector)
p0 = np.copy(r0)
# Set initial errors in solution guess and residual.
error_residual = 1e9
error_x_solution = 1e9
error = 1e9
# Set iteration counter to threshold number of iterations.
iterations = 0
while error > tol:
# Save this value so we only calculate the matrix-vector product once.
w = np.dot(spd_matrix, p0)
# The main algorithm.
# Update search direction magnitude.
alpha = np.dot(r0.T, r0) / np.dot(p0.T, w)
# Update solution guess.
x = x0 + alpha * p0
# Calculate new residual.
r = r0 - alpha * w
# Calculate new Krylov subspace scale.
beta = np.dot(r.T, r) / np.dot(r0.T, r0)
# Calculate new A conjuage search direction.
p = r + beta * p0
# Calculate errors.
error_residual = np.linalg.norm(r - r0)
error_x_solution = np.linalg.norm(x - x0)
error = np.maximum(error_residual, error_x_solution)
# Update variables.
x0 = np.copy(x)
r0 = np.copy(r)
p0 = np.copy(p)
# Update number of iterations.
iterations += 1
if iterations > max_iterations:
break
return x
def test_conjugate_gradient() -> None:
"""
>>> test_conjugate_gradient() # self running tests
"""
# Create linear system with SPD matrix and known solution x_true.
dimension = 3
spd_matrix = _create_spd_matrix(dimension)
x_true = np.random.randn(dimension, 1)
b = np.dot(spd_matrix, x_true)
# Numpy solution.
x_numpy = np.linalg.solve(spd_matrix, b)
# Our implementation.
x_conjugate_gradient = conjugate_gradient(spd_matrix, b)
# Ensure both solutions are close to x_true (and therefore one another).
assert np.linalg.norm(x_numpy - x_true) <= 1e-6
assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_conjugate_gradient()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Each character on a computer is assigned a unique code and the preferred standard is
ASCII (American Standard Code for Information Interchange).
For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII, then
XOR each byte with a given value, taken from a secret key. The advantage with the
XOR function is that using the same encryption key on the cipher text, restores
the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text message, and
the key is made up of random bytes. The user would keep the encrypted message and the
encryption key in different locations, and without both "halves", it is impossible to
decrypt the message.
Unfortunately, this method is impractical for most users, so the modified method is
to use a password as a key. If the password is shorter than the message, which is
likely, the key is repeated cyclically throughout the message. The balance for this
method is using a sufficiently long password key for security, but short enough to
be memorable.
Your task has been made easy, as the encryption key consists of three lower case
characters. Using p059_cipher.txt (right click and 'Save Link/Target As...'), a
file containing the encrypted ASCII codes, and the knowledge that the plain text
must contain common English words, decrypt the message and find the sum of the ASCII
values in the original text.
"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
VALID_CHARS: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
LOWERCASE_INTS: list[int] = [ord(letter) for letter in string.ascii_lowercase]
VALID_INTS: set[int] = {ord(char) for char in VALID_CHARS}
COMMON_WORDS: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def try_key(ciphertext: list[int], key: tuple[int, ...]) -> str | None:
"""
Given an encrypted message and a possible 3-character key, decrypt the message.
If the decrypted message contains a invalid character, i.e. not an ASCII letter,
a digit, punctuation or whitespace, then we know the key is incorrect, so return
None.
>>> try_key([0, 17, 20, 4, 27], (104, 116, 120))
'hello'
>>> try_key([68, 10, 300, 4, 27], (104, 116, 120)) is None
True
"""
decoded: str = ""
keychar: int
cipherchar: int
decodedchar: int
for keychar, cipherchar in zip(cycle(key), ciphertext):
decodedchar = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(decodedchar)
return decoded
def filter_valid_chars(ciphertext: list[int]) -> list[str]:
"""
Given an encrypted message, test all 3-character strings to try and find the
key. Return a list of the possible decrypted messages.
>>> from itertools import cycle
>>> text = "The enemy's gate is down"
>>> key = "end"
>>> encoded = [ord(k) ^ ord(c) for k,c in zip(cycle(key), text)]
>>> text in filter_valid_chars(encoded)
True
"""
possibles: list[str] = []
for key in product(LOWERCASE_INTS, repeat=3):
encoded = try_key(ciphertext, key)
if encoded is not None:
possibles.append(encoded)
return possibles
def filter_common_word(possibles: list[str], common_word: str) -> list[str]:
"""
Given a list of possible decoded messages, narrow down the possibilities
for checking for the presence of a specified common word. Only decoded messages
containing common_word will be returned.
>>> filter_common_word(['asfla adf', 'I am here', ' !?! #a'], 'am')
['I am here']
>>> filter_common_word(['athla amf', 'I am here', ' !?! #a'], 'am')
['athla amf', 'I am here']
"""
return [possible for possible in possibles if common_word in possible.lower()]
def solution(filename: str = "p059_cipher.txt") -> int:
"""
Test the ciphertext against all possible 3-character keys, then narrow down the
possibilities by filtering using common words until there's only one possible
decoded message.
>>> solution("test_cipher.txt")
3000
"""
ciphertext: list[int]
possibles: list[str]
common_word: str
decoded_text: str
data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8")
ciphertext = [int(number) for number in data.strip().split(",")]
possibles = filter_valid_chars(ciphertext)
for common_word in COMMON_WORDS:
possibles = filter_common_word(possibles, common_word)
if len(possibles) == 1:
break
decoded_text = possibles[0]
return sum(ord(char) for char in decoded_text)
if __name__ == "__main__":
print(f"{solution() = }")
| """
Each character on a computer is assigned a unique code and the preferred standard is
ASCII (American Standard Code for Information Interchange).
For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII, then
XOR each byte with a given value, taken from a secret key. The advantage with the
XOR function is that using the same encryption key on the cipher text, restores
the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text message, and
the key is made up of random bytes. The user would keep the encrypted message and the
encryption key in different locations, and without both "halves", it is impossible to
decrypt the message.
Unfortunately, this method is impractical for most users, so the modified method is
to use a password as a key. If the password is shorter than the message, which is
likely, the key is repeated cyclically throughout the message. The balance for this
method is using a sufficiently long password key for security, but short enough to
be memorable.
Your task has been made easy, as the encryption key consists of three lower case
characters. Using p059_cipher.txt (right click and 'Save Link/Target As...'), a
file containing the encrypted ASCII codes, and the knowledge that the plain text
must contain common English words, decrypt the message and find the sum of the ASCII
values in the original text.
"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
VALID_CHARS: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
LOWERCASE_INTS: list[int] = [ord(letter) for letter in string.ascii_lowercase]
VALID_INTS: set[int] = {ord(char) for char in VALID_CHARS}
COMMON_WORDS: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def try_key(ciphertext: list[int], key: tuple[int, ...]) -> str | None:
"""
Given an encrypted message and a possible 3-character key, decrypt the message.
If the decrypted message contains a invalid character, i.e. not an ASCII letter,
a digit, punctuation or whitespace, then we know the key is incorrect, so return
None.
>>> try_key([0, 17, 20, 4, 27], (104, 116, 120))
'hello'
>>> try_key([68, 10, 300, 4, 27], (104, 116, 120)) is None
True
"""
decoded: str = ""
keychar: int
cipherchar: int
decodedchar: int
for keychar, cipherchar in zip(cycle(key), ciphertext):
decodedchar = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(decodedchar)
return decoded
def filter_valid_chars(ciphertext: list[int]) -> list[str]:
"""
Given an encrypted message, test all 3-character strings to try and find the
key. Return a list of the possible decrypted messages.
>>> from itertools import cycle
>>> text = "The enemy's gate is down"
>>> key = "end"
>>> encoded = [ord(k) ^ ord(c) for k,c in zip(cycle(key), text)]
>>> text in filter_valid_chars(encoded)
True
"""
possibles: list[str] = []
for key in product(LOWERCASE_INTS, repeat=3):
encoded = try_key(ciphertext, key)
if encoded is not None:
possibles.append(encoded)
return possibles
def filter_common_word(possibles: list[str], common_word: str) -> list[str]:
"""
Given a list of possible decoded messages, narrow down the possibilities
for checking for the presence of a specified common word. Only decoded messages
containing common_word will be returned.
>>> filter_common_word(['asfla adf', 'I am here', ' !?! #a'], 'am')
['I am here']
>>> filter_common_word(['athla amf', 'I am here', ' !?! #a'], 'am')
['athla amf', 'I am here']
"""
return [possible for possible in possibles if common_word in possible.lower()]
def solution(filename: str = "p059_cipher.txt") -> int:
"""
Test the ciphertext against all possible 3-character keys, then narrow down the
possibilities by filtering using common words until there's only one possible
decoded message.
>>> solution("test_cipher.txt")
3000
"""
ciphertext: list[int]
possibles: list[str]
common_word: str
decoded_text: str
data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8")
ciphertext = [int(number) for number in data.strip().split(",")]
possibles = filter_valid_chars(ciphertext)
for common_word in COMMON_WORDS:
possibles = filter_common_word(possibles, common_word)
if len(possibles) == 1:
break
decoded_text = possibles[0]
return sum(ord(char) for char in decoded_text)
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import numpy as np
from PIL import Image
def rgb2gray(rgb: np.array) -> np.array:
"""
Return gray image from rgb image
>>> rgb2gray(np.array([[[127, 255, 0]]]))
array([[187.6453]])
>>> rgb2gray(np.array([[[0, 0, 0]]]))
array([[0.]])
>>> rgb2gray(np.array([[[2, 4, 1]]]))
array([[3.0598]])
>>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]]))
array([[159.0524, 90.0635, 117.6989]])
"""
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def gray2binary(gray: np.array) -> np.array:
"""
Return binary image from gray image
>>> gray2binary(np.array([[127, 255, 0]]))
array([[False, True, False]])
>>> gray2binary(np.array([[0]]))
array([[False]])
>>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]]))
array([[False, False, False]])
>>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]]))
array([[False, True, False],
[False, True, False],
[False, True, False]])
"""
return (127 < gray) & (gray <= 255)
def erosion(image: np.array, kernel: np.array) -> np.array:
"""
Return eroded image
>>> erosion(np.array([[True, True, False]]), np.array([[0, 1, 0]]))
array([[False, False, False]])
>>> erosion(np.array([[True, False, False]]), np.array([[1, 1, 0]]))
array([[False, False, False]])
"""
output = np.zeros_like(image)
image_padded = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1)
)
# Copy image to padded image
image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
summation = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
output[y, x] = int(summation == 5)
return output
# kernel to be applied
structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
if __name__ == "__main__":
# read original image
image = np.array(Image.open(r"..\image_data\lena.jpg"))
# Apply erosion operation to a binary image
output = erosion(gray2binary(rgb2gray(image)), structuring_element)
# Save the output image
pil_img = Image.fromarray(output).convert("RGB")
pil_img.save("result_erosion.png")
| import numpy as np
from PIL import Image
def rgb2gray(rgb: np.array) -> np.array:
"""
Return gray image from rgb image
>>> rgb2gray(np.array([[[127, 255, 0]]]))
array([[187.6453]])
>>> rgb2gray(np.array([[[0, 0, 0]]]))
array([[0.]])
>>> rgb2gray(np.array([[[2, 4, 1]]]))
array([[3.0598]])
>>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]]))
array([[159.0524, 90.0635, 117.6989]])
"""
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def gray2binary(gray: np.array) -> np.array:
"""
Return binary image from gray image
>>> gray2binary(np.array([[127, 255, 0]]))
array([[False, True, False]])
>>> gray2binary(np.array([[0]]))
array([[False]])
>>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]]))
array([[False, False, False]])
>>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]]))
array([[False, True, False],
[False, True, False],
[False, True, False]])
"""
return (127 < gray) & (gray <= 255)
def erosion(image: np.array, kernel: np.array) -> np.array:
"""
Return eroded image
>>> erosion(np.array([[True, True, False]]), np.array([[0, 1, 0]]))
array([[False, False, False]])
>>> erosion(np.array([[True, False, False]]), np.array([[1, 1, 0]]))
array([[False, False, False]])
"""
output = np.zeros_like(image)
image_padded = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1)
)
# Copy image to padded image
image_padded[kernel.shape[0] - 2 : -1 :, kernel.shape[1] - 2 : -1 :] = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
summation = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
output[y, x] = int(summation == 5)
return output
# kernel to be applied
structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
if __name__ == "__main__":
# read original image
image = np.array(Image.open(r"..\image_data\lena.jpg"))
# Apply erosion operation to a binary image
output = erosion(gray2binary(rgb2gray(image)), structuring_element)
# Save the output image
pil_img = Image.fromarray(output).convert("RGB")
pil_img.save("result_erosion.png")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 64: https://projecteuler.net/problem=64
All square roots are periodic when written as continued fractions.
For example, let us consider sqrt(23).
It can be seen that the sequence is repeating.
For conciseness, we use the notation sqrt(23)=[4;(1,3,1,8)],
to indicate that the block (1,3,1,8) repeats indefinitely.
Exactly four continued fractions, for N<=13, have an odd period.
How many continued fractions for N<=10000 have an odd period?
References:
- https://en.wikipedia.org/wiki/Continued_fraction
"""
from math import floor, sqrt
def continuous_fraction_period(n: int) -> int:
"""
Returns the continued fraction period of a number n.
>>> continuous_fraction_period(2)
1
>>> continuous_fraction_period(5)
1
>>> continuous_fraction_period(7)
4
>>> continuous_fraction_period(11)
2
>>> continuous_fraction_period(13)
5
"""
numerator = 0.0
denominator = 1.0
ROOT = int(sqrt(n)) # noqa: N806
integer_part = ROOT
period = 0
while integer_part != 2 * ROOT:
numerator = denominator * integer_part - numerator
denominator = (n - numerator**2) / denominator
integer_part = int((ROOT + numerator) / denominator)
period += 1
return period
def solution(n: int = 10000) -> int:
"""
Returns the count of numbers <= 10000 with odd periods.
This function calls continuous_fraction_period for numbers which are
not perfect squares.
This is checked in if sr - floor(sr) != 0 statement.
If an odd period is returned by continuous_fraction_period,
count_odd_periods is increased by 1.
>>> solution(2)
1
>>> solution(5)
2
>>> solution(7)
2
>>> solution(11)
3
>>> solution(13)
4
"""
count_odd_periods = 0
for i in range(2, n + 1):
sr = sqrt(i)
if sr - floor(sr) != 0:
if continuous_fraction_period(i) % 2 == 1:
count_odd_periods += 1
return count_odd_periods
if __name__ == "__main__":
print(f"{solution(int(input().strip()))}")
| """
Project Euler Problem 64: https://projecteuler.net/problem=64
All square roots are periodic when written as continued fractions.
For example, let us consider sqrt(23).
It can be seen that the sequence is repeating.
For conciseness, we use the notation sqrt(23)=[4;(1,3,1,8)],
to indicate that the block (1,3,1,8) repeats indefinitely.
Exactly four continued fractions, for N<=13, have an odd period.
How many continued fractions for N<=10000 have an odd period?
References:
- https://en.wikipedia.org/wiki/Continued_fraction
"""
from math import floor, sqrt
def continuous_fraction_period(n: int) -> int:
"""
Returns the continued fraction period of a number n.
>>> continuous_fraction_period(2)
1
>>> continuous_fraction_period(5)
1
>>> continuous_fraction_period(7)
4
>>> continuous_fraction_period(11)
2
>>> continuous_fraction_period(13)
5
"""
numerator = 0.0
denominator = 1.0
ROOT = int(sqrt(n)) # noqa: N806
integer_part = ROOT
period = 0
while integer_part != 2 * ROOT:
numerator = denominator * integer_part - numerator
denominator = (n - numerator**2) / denominator
integer_part = int((ROOT + numerator) / denominator)
period += 1
return period
def solution(n: int = 10000) -> int:
"""
Returns the count of numbers <= 10000 with odd periods.
This function calls continuous_fraction_period for numbers which are
not perfect squares.
This is checked in if sr - floor(sr) != 0 statement.
If an odd period is returned by continuous_fraction_period,
count_odd_periods is increased by 1.
>>> solution(2)
1
>>> solution(5)
2
>>> solution(7)
2
>>> solution(11)
3
>>> solution(13)
4
"""
count_odd_periods = 0
for i in range(2, n + 1):
sr = sqrt(i)
if sr - floor(sr) != 0:
if continuous_fraction_period(i) % 2 == 1:
count_odd_periods += 1
return count_odd_periods
if __name__ == "__main__":
print(f"{solution(int(input().strip()))}")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """ A Queue using a linked list like structure """
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class Node:
def __init__(self, data: Any) -> None:
self.data: Any = data
self.next: Node | None = None
def __str__(self) -> str:
return f"{self.data}"
class LinkedQueue:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> queue.put(5)
>>> queue.put(9)
>>> queue.put('python')
>>> queue.is_empty()
False
>>> queue.get()
5
>>> queue.put('algorithms')
>>> queue.get()
9
>>> queue.get()
'python'
>>> queue.get()
'algorithms'
>>> queue.is_empty()
True
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
"""
def __init__(self) -> None:
self.front: Node | None = None
self.rear: Node | None = None
def __iter__(self) -> Iterator[Any]:
node = self.front
while node:
yield node.data
node = node.next
def __len__(self) -> int:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> len(queue)
5
>>> for i in range(1, 6):
... assert len(queue) == 6 - i
... _ = queue.get()
>>> len(queue)
0
"""
return len(tuple(iter(self)))
def __str__(self) -> str:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 4):
... queue.put(i)
>>> queue.put("Python")
>>> queue.put(3.14)
>>> queue.put(True)
>>> str(queue)
'1 <- 2 <- 3 <- Python <- 3.14 <- True'
"""
return " <- ".join(str(item) for item in self)
def is_empty(self) -> bool:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.is_empty()
False
"""
return len(self) == 0
def put(self, item: Any) -> None:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> for i in range(1, 6):
... queue.put(i)
>>> str(queue)
'1 <- 2 <- 3 <- 4 <- 5'
"""
node = Node(item)
if self.is_empty():
self.front = self.rear = node
else:
assert isinstance(self.rear, Node)
self.rear.next = node
self.rear = node
def get(self) -> Any:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> for i in range(1, 6):
... assert queue.get() == i
>>> len(queue)
0
"""
if self.is_empty():
raise IndexError("dequeue from empty queue")
assert isinstance(self.front, Node)
node = self.front
self.front = self.front.next
if self.front is None:
self.rear = None
return node.data
def clear(self) -> None:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.clear()
>>> len(queue)
0
>>> str(queue)
''
"""
self.front = self.rear = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| """ A Queue using a linked list like structure """
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class Node:
def __init__(self, data: Any) -> None:
self.data: Any = data
self.next: Node | None = None
def __str__(self) -> str:
return f"{self.data}"
class LinkedQueue:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> queue.put(5)
>>> queue.put(9)
>>> queue.put('python')
>>> queue.is_empty()
False
>>> queue.get()
5
>>> queue.put('algorithms')
>>> queue.get()
9
>>> queue.get()
'python'
>>> queue.get()
'algorithms'
>>> queue.is_empty()
True
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
"""
def __init__(self) -> None:
self.front: Node | None = None
self.rear: Node | None = None
def __iter__(self) -> Iterator[Any]:
node = self.front
while node:
yield node.data
node = node.next
def __len__(self) -> int:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> len(queue)
5
>>> for i in range(1, 6):
... assert len(queue) == 6 - i
... _ = queue.get()
>>> len(queue)
0
"""
return len(tuple(iter(self)))
def __str__(self) -> str:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 4):
... queue.put(i)
>>> queue.put("Python")
>>> queue.put(3.14)
>>> queue.put(True)
>>> str(queue)
'1 <- 2 <- 3 <- Python <- 3.14 <- True'
"""
return " <- ".join(str(item) for item in self)
def is_empty(self) -> bool:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.is_empty()
False
"""
return len(self) == 0
def put(self, item: Any) -> None:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> for i in range(1, 6):
... queue.put(i)
>>> str(queue)
'1 <- 2 <- 3 <- 4 <- 5'
"""
node = Node(item)
if self.is_empty():
self.front = self.rear = node
else:
assert isinstance(self.rear, Node)
self.rear.next = node
self.rear = node
def get(self) -> Any:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> for i in range(1, 6):
... assert queue.get() == i
>>> len(queue)
0
"""
if self.is_empty():
raise IndexError("dequeue from empty queue")
assert isinstance(self.front, Node)
node = self.front
self.front = self.front.next
if self.front is None:
self.rear = None
return node.data
def clear(self) -> None:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.clear()
>>> len(queue)
0
>>> str(queue)
''
"""
self.front = self.rear = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class StackOverflowError(BaseException):
pass
class StackUnderflowError(BaseException):
pass
class Stack(Generic[T]):
"""A stack is an abstract data type that serves as a collection of
elements with two principal operations: push() and pop(). push() adds an
element to the top of the stack, and pop() removes an element from the top
of a stack. The order in which elements come off of a stack are
Last In, First Out (LIFO).
https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
"""
def __init__(self, limit: int = 10):
self.stack: list[T] = []
self.limit = limit
def __bool__(self) -> bool:
return bool(self.stack)
def __str__(self) -> str:
return str(self.stack)
def push(self, data: T) -> None:
"""Push an element to the top of the stack."""
if len(self.stack) >= self.limit:
raise StackOverflowError
self.stack.append(data)
def pop(self) -> T:
"""
Pop an element off of the top of the stack.
>>> Stack().pop()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack.pop()
def peek(self) -> T:
"""
Peek at the top-most element of the stack.
>>> Stack().pop()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack[-1]
def is_empty(self) -> bool:
"""Check if a stack is empty."""
return not bool(self.stack)
def is_full(self) -> bool:
return self.size() == self.limit
def size(self) -> int:
"""Return the size of the stack."""
return len(self.stack)
def __contains__(self, item: T) -> bool:
"""Check if item is in stack"""
return item in self.stack
def test_stack() -> None:
"""
>>> test_stack()
"""
stack: Stack[int] = Stack(10)
assert bool(stack) is False
assert stack.is_empty() is True
assert stack.is_full() is False
assert str(stack) == "[]"
try:
_ = stack.pop()
raise AssertionError() # This should not happen
except StackUnderflowError:
assert True # This should happen
try:
_ = stack.peek()
raise AssertionError() # This should not happen
except StackUnderflowError:
assert True # This should happen
for i in range(10):
assert stack.size() == i
stack.push(i)
assert bool(stack)
assert not stack.is_empty()
assert stack.is_full()
assert str(stack) == str(list(range(10)))
assert stack.pop() == 9
assert stack.peek() == 8
stack.push(100)
assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100])
try:
stack.push(200)
raise AssertionError() # This should not happen
except StackOverflowError:
assert True # This should happen
assert not stack.is_empty()
assert stack.size() == 10
assert 5 in stack
assert 55 not in stack
if __name__ == "__main__":
test_stack()
| from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class StackOverflowError(BaseException):
pass
class StackUnderflowError(BaseException):
pass
class Stack(Generic[T]):
"""A stack is an abstract data type that serves as a collection of
elements with two principal operations: push() and pop(). push() adds an
element to the top of the stack, and pop() removes an element from the top
of a stack. The order in which elements come off of a stack are
Last In, First Out (LIFO).
https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
"""
def __init__(self, limit: int = 10):
self.stack: list[T] = []
self.limit = limit
def __bool__(self) -> bool:
return bool(self.stack)
def __str__(self) -> str:
return str(self.stack)
def push(self, data: T) -> None:
"""Push an element to the top of the stack."""
if len(self.stack) >= self.limit:
raise StackOverflowError
self.stack.append(data)
def pop(self) -> T:
"""
Pop an element off of the top of the stack.
>>> Stack().pop()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack.pop()
def peek(self) -> T:
"""
Peek at the top-most element of the stack.
>>> Stack().pop()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack[-1]
def is_empty(self) -> bool:
"""Check if a stack is empty."""
return not bool(self.stack)
def is_full(self) -> bool:
return self.size() == self.limit
def size(self) -> int:
"""Return the size of the stack."""
return len(self.stack)
def __contains__(self, item: T) -> bool:
"""Check if item is in stack"""
return item in self.stack
def test_stack() -> None:
"""
>>> test_stack()
"""
stack: Stack[int] = Stack(10)
assert bool(stack) is False
assert stack.is_empty() is True
assert stack.is_full() is False
assert str(stack) == "[]"
try:
_ = stack.pop()
raise AssertionError() # This should not happen
except StackUnderflowError:
assert True # This should happen
try:
_ = stack.peek()
raise AssertionError() # This should not happen
except StackUnderflowError:
assert True # This should happen
for i in range(10):
assert stack.size() == i
stack.push(i)
assert bool(stack)
assert not stack.is_empty()
assert stack.is_full()
assert str(stack) == str(list(range(10)))
assert stack.pop() == 9
assert stack.peek() == 8
stack.push(100)
assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100])
try:
stack.push(200)
raise AssertionError() # This should not happen
except StackOverflowError:
assert True # This should happen
assert not stack.is_empty()
assert stack.size() == 10
assert 5 in stack
assert 55 not in stack
if __name__ == "__main__":
test_stack()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
The stock span problem is a financial problem where we have a series of n daily
price quotes for a stock and we need to calculate span of stock's price for all n days.
The span Si of the stock's price on a given day i is defined as the maximum
number of consecutive days just before the given day, for which the price of the stock
on the current day is less than or equal to its price on the given day.
"""
def calculation_span(price, s):
n = len(price)
# Create a stack and push index of fist element to it
st = []
st.append(0)
# Span value of first element is always 1
s[0] = 1
# Calculate span values for rest of the elements
for i in range(1, n):
# Pop elements from stack while stack is not
# empty and top of stack is smaller than price[i]
while len(st) > 0 and price[st[0]] <= price[i]:
st.pop()
# If stack becomes empty, then price[i] is greater
# than all elements on left of it, i.e. price[0],
# price[1], ..price[i-1]. Else the price[i] is
# greater than elements after top of stack
s[i] = i + 1 if len(st) <= 0 else (i - st[0])
# Push this element to stack
st.append(i)
# A utility function to print elements of array
def print_array(arr, n):
for i in range(0, n):
print(arr[i], end=" ")
# Driver program to test above function
price = [10, 4, 5, 90, 120, 80]
S = [0 for i in range(len(price) + 1)]
# Fill the span values in array S[]
calculation_span(price, S)
# Print the calculated span values
print_array(S, len(price))
| """
The stock span problem is a financial problem where we have a series of n daily
price quotes for a stock and we need to calculate span of stock's price for all n days.
The span Si of the stock's price on a given day i is defined as the maximum
number of consecutive days just before the given day, for which the price of the stock
on the current day is less than or equal to its price on the given day.
"""
def calculation_span(price, s):
n = len(price)
# Create a stack and push index of fist element to it
st = []
st.append(0)
# Span value of first element is always 1
s[0] = 1
# Calculate span values for rest of the elements
for i in range(1, n):
# Pop elements from stack while stack is not
# empty and top of stack is smaller than price[i]
while len(st) > 0 and price[st[0]] <= price[i]:
st.pop()
# If stack becomes empty, then price[i] is greater
# than all elements on left of it, i.e. price[0],
# price[1], ..price[i-1]. Else the price[i] is
# greater than elements after top of stack
s[i] = i + 1 if len(st) <= 0 else (i - st[0])
# Push this element to stack
st.append(i)
# A utility function to print elements of array
def print_array(arr, n):
for i in range(0, n):
print(arr[i], end=" ")
# Driver program to test above function
price = [10, 4, 5, 90, 120, 80]
S = [0 for i in range(len(price) + 1)]
# Fill the span values in array S[]
calculation_span(price, S)
# Print the calculated span values
print_array(S, len(price))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
def search_in_a_sorted_matrix(
mat: list[list[int]], m: int, n: int, key: int | float
) -> None:
"""
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 5)
Key 5 found at row- 1 column- 2
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 21)
Key 21 not found
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.1)
Key 2.1 found at row- 1 column- 1
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.2)
Key 2.2 not found
"""
i, j = m - 1, 0
while i >= 0 and j < n:
if key == mat[i][j]:
print(f"Key {key} found at row- {i + 1} column- {j + 1}")
return
if key < mat[i][j]:
i -= 1
else:
j += 1
print(f"Key {key} not found")
def main() -> None:
mat = [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]]
x = int(input("Enter the element to be searched:"))
print(mat)
search_in_a_sorted_matrix(mat, len(mat), len(mat[0]), x)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| from __future__ import annotations
def search_in_a_sorted_matrix(
mat: list[list[int]], m: int, n: int, key: int | float
) -> None:
"""
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 5)
Key 5 found at row- 1 column- 2
>>> search_in_a_sorted_matrix(
... [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 21)
Key 21 not found
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.1)
Key 2.1 found at row- 1 column- 1
>>> search_in_a_sorted_matrix(
... [[2.1, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]], 3, 3, 2.2)
Key 2.2 not found
"""
i, j = m - 1, 0
while i >= 0 and j < n:
if key == mat[i][j]:
print(f"Key {key} found at row- {i + 1} column- {j + 1}")
return
if key < mat[i][j]:
i -= 1
else:
j += 1
print(f"Key {key} not found")
def main() -> None:
mat = [[2, 5, 7], [4, 8, 13], [9, 11, 15], [12, 17, 20]]
x = int(input("Enter the element to be searched:"))
print(mat)
search_in_a_sorted_matrix(mat, len(mat), len(mat[0]), x)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import cv2
import numpy as np
def get_neighbors_pixel(
image: np.ndarray, x_coordinate: int, y_coordinate: int, center: int
) -> int:
"""
Comparing local neighborhood pixel value with threshold value of centre pixel.
Exception is required when neighborhood value of a center pixel value is null.
i.e. values present at boundaries.
:param image: The image we're working with
:param x_coordinate: x-coordinate of the pixel
:param y_coordinate: The y coordinate of the pixel
:param center: center pixel value
:return: The value of the pixel is being returned.
"""
try:
return int(image[x_coordinate][y_coordinate] >= center)
except (IndexError, TypeError):
return 0
def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) -> int:
"""
It takes an image, an x and y coordinate, and returns the
decimal value of the local binary patternof the pixel
at that coordinate
:param image: the image to be processed
:param x_coordinate: x coordinate of the pixel
:param y_coordinate: the y coordinate of the pixel
:return: The decimal value of the binary value of the pixels
around the center pixel.
"""
center = image[x_coordinate][y_coordinate]
powers = [1, 2, 4, 8, 16, 32, 64, 128]
# skip get_neighbors_pixel if center is null
if center is None:
return 0
# Starting from the top right, assigning value to pixels clockwise
binary_values = [
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate - 1, center),
get_neighbors_pixel(image, x_coordinate, y_coordinate - 1, center),
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate - 1, center),
]
# Converting the binary value to decimal.
return sum(
binary_value * power for binary_value, power in zip(binary_values, powers)
)
if __name__ == "main":
# Reading the image and converting it to grayscale.
image = cv2.imread(
"digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE
)
# Create a numpy array as the same height and width of read image
lbp_image = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the
# local binary pattern value for each pixel.
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
lbp_image[i][j] = local_binary_value(image, i, j)
cv2.imshow("local binary pattern", lbp_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| import cv2
import numpy as np
def get_neighbors_pixel(
image: np.ndarray, x_coordinate: int, y_coordinate: int, center: int
) -> int:
"""
Comparing local neighborhood pixel value with threshold value of centre pixel.
Exception is required when neighborhood value of a center pixel value is null.
i.e. values present at boundaries.
:param image: The image we're working with
:param x_coordinate: x-coordinate of the pixel
:param y_coordinate: The y coordinate of the pixel
:param center: center pixel value
:return: The value of the pixel is being returned.
"""
try:
return int(image[x_coordinate][y_coordinate] >= center)
except (IndexError, TypeError):
return 0
def local_binary_value(image: np.ndarray, x_coordinate: int, y_coordinate: int) -> int:
"""
It takes an image, an x and y coordinate, and returns the
decimal value of the local binary patternof the pixel
at that coordinate
:param image: the image to be processed
:param x_coordinate: x coordinate of the pixel
:param y_coordinate: the y coordinate of the pixel
:return: The decimal value of the binary value of the pixels
around the center pixel.
"""
center = image[x_coordinate][y_coordinate]
powers = [1, 2, 4, 8, 16, 32, 64, 128]
# skip get_neighbors_pixel if center is null
if center is None:
return 0
# Starting from the top right, assigning value to pixels clockwise
binary_values = [
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate + 1, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate, center),
get_neighbors_pixel(image, x_coordinate + 1, y_coordinate - 1, center),
get_neighbors_pixel(image, x_coordinate, y_coordinate - 1, center),
get_neighbors_pixel(image, x_coordinate - 1, y_coordinate - 1, center),
]
# Converting the binary value to decimal.
return sum(
binary_value * power for binary_value, power in zip(binary_values, powers)
)
if __name__ == "main":
# Reading the image and converting it to grayscale.
image = cv2.imread(
"digital_image_processing/image_data/lena.jpg", cv2.IMREAD_GRAYSCALE
)
# Create a numpy array as the same height and width of read image
lbp_image = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the
# local binary pattern value for each pixel.
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
lbp_image[i][j] = local_binary_value(image, i, j)
cv2.imshow("local binary pattern", lbp_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
An Armstrong number is equal to the sum of its own digits each raised to the
power of the number of digits.
For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370.
Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers.
On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188
"""
PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401)
FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None)
def armstrong_number(n: int) -> bool:
"""
Return True if n is an Armstrong number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Initialization of sum and number of digits.
total = 0
number_of_digits = 0
temp = n
# Calculation of digits of the number
while temp > 0:
number_of_digits += 1
temp //= 10
# Dividing number into separate digits and find Armstrong number
temp = n
while temp > 0:
rem = temp % 10
total += rem**number_of_digits
temp //= 10
return n == total
def pluperfect_number(n: int) -> bool:
"""Return True if n is a pluperfect number or False if it is not
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Init a "histogram" of the digits
digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
digit_total = 0
total = 0
temp = n
while temp > 0:
temp, rem = divmod(temp, 10)
digit_histogram[rem] += 1
digit_total += 1
for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))):
total += cnt * i**digit_total
return n == total
def narcissistic_number(n: int) -> bool:
"""Return True if n is a narcissistic number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
expo = len(str(n)) # the power that all digits will be raised to
# check if sum of each digit multiplied expo times is equal to number
return n == sum(int(i) ** expo for i in str(n))
def main():
"""
Request that user input an integer and tell them if it is Armstrong number.
"""
num = int(input("Enter an integer to see if it is an Armstrong number: ").strip())
print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| """
An Armstrong number is equal to the sum of its own digits each raised to the
power of the number of digits.
For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370.
Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers.
On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188
"""
PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401)
FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None)
def armstrong_number(n: int) -> bool:
"""
Return True if n is an Armstrong number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Initialization of sum and number of digits.
total = 0
number_of_digits = 0
temp = n
# Calculation of digits of the number
while temp > 0:
number_of_digits += 1
temp //= 10
# Dividing number into separate digits and find Armstrong number
temp = n
while temp > 0:
rem = temp % 10
total += rem**number_of_digits
temp //= 10
return n == total
def pluperfect_number(n: int) -> bool:
"""Return True if n is a pluperfect number or False if it is not
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
# Init a "histogram" of the digits
digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
digit_total = 0
total = 0
temp = n
while temp > 0:
temp, rem = divmod(temp, 10)
digit_histogram[rem] += 1
digit_total += 1
for (cnt, i) in zip(digit_histogram, range(len(digit_histogram))):
total += cnt * i**digit_total
return n == total
def narcissistic_number(n: int) -> bool:
"""Return True if n is a narcissistic number or False if it is not.
>>> all(armstrong_number(n) for n in PASSING)
True
>>> any(armstrong_number(n) for n in FAILING)
False
"""
if not isinstance(n, int) or n < 1:
return False
expo = len(str(n)) # the power that all digits will be raised to
# check if sum of each digit multiplied expo times is equal to number
return n == sum(int(i) ** expo for i in str(n))
def main():
"""
Request that user input an integer and tell them if it is Armstrong number.
"""
num = int(input("Enter an integer to see if it is an Armstrong number: ").strip())
print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.")
print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Hill Cipher:
The 'HillCipher' class below implements the Hill Cipher algorithm which uses
modern linear algebra techniques to encode and decode text using an encryption
key matrix.
Algorithm:
Let the order of the encryption key be N (as it is a square matrix).
Your text is divided into batches of length N and converted to numerical vectors
by a simple mapping starting with A=0 and so on.
The key is then multiplied with the newly created batch vector to obtain the
encoded vector. After each multiplication modular 36 calculations are performed
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
their corresponding alphanumerics.
While decrypting, the decrypting key is found which is the inverse of the
encrypting key modular 36. The same process is repeated for decrypting to get
the original message back.
Constraints:
The determinant of the encryption key matrix must be relatively prime w.r.t 36.
Note:
This implementation only considers alphanumerics in the text. If the length of
the text to be encrypted is not a multiple of the break key(the length of one
batch of letters), the last character of the text is added to the text until the
length of the text reaches a multiple of the break_key. So the text after
decrypting might be a little different than the original text.
References:
https://apprendre-en-ligne.net/crypto/hill/Hillciph.pdf
https://www.youtube.com/watch?v=kfmNeskzs2o
https://www.youtube.com/watch?v=4RhLNDqcjpA
"""
import string
import numpy
def greatest_common_divisor(a: int, b: int) -> int:
"""
>>> greatest_common_divisor(4, 8)
4
>>> greatest_common_divisor(8, 4)
4
>>> greatest_common_divisor(4, 7)
1
>>> greatest_common_divisor(0, 10)
10
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
class HillCipher:
key_string = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
to_int = numpy.vectorize(round)
def __init__(self, encrypt_key: numpy.ndarray) -> None:
"""
encrypt_key is an NxN numpy array
"""
self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
self.break_key = encrypt_key.shape[0]
def replace_letters(self, letter: str) -> int:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_letters('T')
19
>>> hill_cipher.replace_letters('0')
26
"""
return self.key_string.index(letter)
def replace_digits(self, num: int) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_digits(19)
'T'
>>> hill_cipher.replace_digits(26)
'0'
"""
return self.key_string[round(num)]
def check_determinant(self) -> None:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.check_determinant()
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
raise ValueError(
f"determinant modular {req_l} of encryption key({det}) is not co prime "
f"w.r.t {req_l}.\nTry another key."
)
def process_text(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.process_text('Testing Hill Cipher')
'TESTINGHILLCIPHERR'
>>> hill_cipher.process_text('hello')
'HELLOO'
"""
chars = [char for char in text.upper() if char in self.key_string]
last = chars[-1]
while len(chars) % self.break_key != 0:
chars.append(last)
return "".join(chars)
def encrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.encrypt('testing hill cipher')
'WHXYJOLM9C6XT085LL'
>>> hill_cipher.encrypt('hello')
'85FF00'
"""
text = self.process_text(text.upper())
encrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
0
]
encrypted_batch = "".join(
self.replace_digits(num) for num in batch_encrypted
)
encrypted += encrypted_batch
return encrypted
def make_decrypt_key(self) -> numpy.ndarray:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.make_decrypt_key()
array([[ 6, 25],
[ 5, 26]])
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
det_inv = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
det_inv = i
break
inv_key = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(inv_key))
def decrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
'TESTINGHILLCIPHERR'
>>> hill_cipher.decrypt('85FF00')
'HELLOO'
"""
decrypt_key = self.make_decrypt_key()
text = self.process_text(text.upper())
decrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0]
decrypted_batch = "".join(
self.replace_digits(num) for num in batch_decrypted
)
decrypted += decrypted_batch
return decrypted
def main() -> None:
n = int(input("Enter the order of the encryption key: "))
hill_matrix = []
print("Enter each row of the encryption key with space separated integers")
for _ in range(n):
row = [int(x) for x in input().split()]
hill_matrix.append(row)
hc = HillCipher(numpy.array(hill_matrix))
print("Would you like to encrypt or decrypt some text? (1 or 2)")
option = input("\n1. Encrypt\n2. Decrypt\n")
if option == "1":
text_e = input("What text would you like to encrypt?: ")
print("Your encrypted text is:")
print(hc.encrypt(text_e))
elif option == "2":
text_d = input("What text would you like to decrypt?: ")
print("Your decrypted text is:")
print(hc.decrypt(text_d))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| """
Hill Cipher:
The 'HillCipher' class below implements the Hill Cipher algorithm which uses
modern linear algebra techniques to encode and decode text using an encryption
key matrix.
Algorithm:
Let the order of the encryption key be N (as it is a square matrix).
Your text is divided into batches of length N and converted to numerical vectors
by a simple mapping starting with A=0 and so on.
The key is then multiplied with the newly created batch vector to obtain the
encoded vector. After each multiplication modular 36 calculations are performed
on the vectors so as to bring the numbers between 0 and 36 and then mapped with
their corresponding alphanumerics.
While decrypting, the decrypting key is found which is the inverse of the
encrypting key modular 36. The same process is repeated for decrypting to get
the original message back.
Constraints:
The determinant of the encryption key matrix must be relatively prime w.r.t 36.
Note:
This implementation only considers alphanumerics in the text. If the length of
the text to be encrypted is not a multiple of the break key(the length of one
batch of letters), the last character of the text is added to the text until the
length of the text reaches a multiple of the break_key. So the text after
decrypting might be a little different than the original text.
References:
https://apprendre-en-ligne.net/crypto/hill/Hillciph.pdf
https://www.youtube.com/watch?v=kfmNeskzs2o
https://www.youtube.com/watch?v=4RhLNDqcjpA
"""
import string
import numpy
def greatest_common_divisor(a: int, b: int) -> int:
"""
>>> greatest_common_divisor(4, 8)
4
>>> greatest_common_divisor(8, 4)
4
>>> greatest_common_divisor(4, 7)
1
>>> greatest_common_divisor(0, 10)
10
"""
return b if a == 0 else greatest_common_divisor(b % a, a)
class HillCipher:
key_string = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
modulus = numpy.vectorize(lambda x: x % 36)
to_int = numpy.vectorize(round)
def __init__(self, encrypt_key: numpy.ndarray) -> None:
"""
encrypt_key is an NxN numpy array
"""
self.encrypt_key = self.modulus(encrypt_key) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
self.break_key = encrypt_key.shape[0]
def replace_letters(self, letter: str) -> int:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_letters('T')
19
>>> hill_cipher.replace_letters('0')
26
"""
return self.key_string.index(letter)
def replace_digits(self, num: int) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.replace_digits(19)
'T'
>>> hill_cipher.replace_digits(26)
'0'
"""
return self.key_string[round(num)]
def check_determinant(self) -> None:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.check_determinant()
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
req_l = len(self.key_string)
if greatest_common_divisor(det, len(self.key_string)) != 1:
raise ValueError(
f"determinant modular {req_l} of encryption key({det}) is not co prime "
f"w.r.t {req_l}.\nTry another key."
)
def process_text(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.process_text('Testing Hill Cipher')
'TESTINGHILLCIPHERR'
>>> hill_cipher.process_text('hello')
'HELLOO'
"""
chars = [char for char in text.upper() if char in self.key_string]
last = chars[-1]
while len(chars) % self.break_key != 0:
chars.append(last)
return "".join(chars)
def encrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.encrypt('testing hill cipher')
'WHXYJOLM9C6XT085LL'
>>> hill_cipher.encrypt('hello')
'85FF00'
"""
text = self.process_text(text.upper())
encrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_encrypted = self.modulus(self.encrypt_key.dot(batch_vec)).T.tolist()[
0
]
encrypted_batch = "".join(
self.replace_digits(num) for num in batch_encrypted
)
encrypted += encrypted_batch
return encrypted
def make_decrypt_key(self) -> numpy.ndarray:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.make_decrypt_key()
array([[ 6, 25],
[ 5, 26]])
"""
det = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
det = det % len(self.key_string)
det_inv = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
det_inv = i
break
inv_key = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(inv_key))
def decrypt(self, text: str) -> str:
"""
>>> hill_cipher = HillCipher(numpy.array([[2, 5], [1, 6]]))
>>> hill_cipher.decrypt('WHXYJOLM9C6XT085LL')
'TESTINGHILLCIPHERR'
>>> hill_cipher.decrypt('85FF00')
'HELLOO'
"""
decrypt_key = self.make_decrypt_key()
text = self.process_text(text.upper())
decrypted = ""
for i in range(0, len(text) - self.break_key + 1, self.break_key):
batch = text[i : i + self.break_key]
vec = [self.replace_letters(char) for char in batch]
batch_vec = numpy.array([vec]).T
batch_decrypted = self.modulus(decrypt_key.dot(batch_vec)).T.tolist()[0]
decrypted_batch = "".join(
self.replace_digits(num) for num in batch_decrypted
)
decrypted += decrypted_batch
return decrypted
def main() -> None:
n = int(input("Enter the order of the encryption key: "))
hill_matrix = []
print("Enter each row of the encryption key with space separated integers")
for _ in range(n):
row = [int(x) for x in input().split()]
hill_matrix.append(row)
hc = HillCipher(numpy.array(hill_matrix))
print("Would you like to encrypt or decrypt some text? (1 or 2)")
option = input("\n1. Encrypt\n2. Decrypt\n")
if option == "1":
text_e = input("What text would you like to encrypt?: ")
print("Your encrypted text is:")
print(hc.encrypt(text_e))
elif option == "2":
text_d = input("What text would you like to decrypt?: ")
print("Your decrypted text is:")
print(hc.decrypt(text_d))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
This function implements the shell sort algorithm
which is slightly faster than its pure implementation.
This shell sort is implemented using a gap, which
shrinks by a certain factor each iteration. In this
implementation, the gap is initially set to the
length of the collection. The gap is then reduced by
a certain factor (1.3) each iteration.
For each iteration, the algorithm compares elements
that are a certain number of positions apart
(determined by the gap). If the element at the higher
position is greater than the element at the lower
position, the two elements are swapped. The process
is repeated until the gap is equal to 1.
The reason this is more efficient is that it reduces
the number of comparisons that need to be made. By
using a smaller gap, the list is sorted more quickly.
"""
def shell_sort(collection: list) -> list:
"""Implementation of shell sort algorithm in Python
:param collection: Some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
>>> shell_sort([3, 2, 1])
[1, 2, 3]
>>> shell_sort([])
[]
>>> shell_sort([1])
[1]
"""
# Choose an initial gap value
gap = len(collection)
# Set the gap value to be decreased by a factor of 1.3
# after each iteration
shrink = 1.3
# Continue sorting until the gap is 1
while gap > 1:
# Decrease the gap value
gap = int(gap / shrink)
# Sort the elements using insertion sort
for i in range(gap, len(collection)):
temp = collection[i]
j = i
while j >= gap and collection[j - gap] > temp:
collection[j] = collection[j - gap]
j -= gap
collection[j] = temp
return collection
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
This function implements the shell sort algorithm
which is slightly faster than its pure implementation.
This shell sort is implemented using a gap, which
shrinks by a certain factor each iteration. In this
implementation, the gap is initially set to the
length of the collection. The gap is then reduced by
a certain factor (1.3) each iteration.
For each iteration, the algorithm compares elements
that are a certain number of positions apart
(determined by the gap). If the element at the higher
position is greater than the element at the lower
position, the two elements are swapped. The process
is repeated until the gap is equal to 1.
The reason this is more efficient is that it reduces
the number of comparisons that need to be made. By
using a smaller gap, the list is sorted more quickly.
"""
def shell_sort(collection: list) -> list:
"""Implementation of shell sort algorithm in Python
:param collection: Some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
>>> shell_sort([3, 2, 1])
[1, 2, 3]
>>> shell_sort([])
[]
>>> shell_sort([1])
[1]
"""
# Choose an initial gap value
gap = len(collection)
# Set the gap value to be decreased by a factor of 1.3
# after each iteration
shrink = 1.3
# Continue sorting until the gap is 1
while gap > 1:
# Decrease the gap value
gap = int(gap / shrink)
# Sort the elements using insertion sort
for i in range(gap, len(collection)):
temp = collection[i]
j = i
while j >= gap and collection[j - gap] > temp:
collection[j] = collection[j - gap]
j -= gap
collection[j] = temp
return collection
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Gamma function is a very useful tool in math and physics.
It helps calculating complex integral in a convenient way.
for more info: https://en.wikipedia.org/wiki/Gamma_function
Python's Standard Library math.gamma() function overflows around gamma(171.624).
"""
from math import pi, sqrt
def gamma(num: float) -> float:
"""
Calculates the value of Gamma function of num
where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...).
Implemented using recursion
Examples:
>>> from math import isclose, gamma as math_gamma
>>> gamma(0.5)
1.7724538509055159
>>> gamma(2)
1.0
>>> gamma(3.5)
3.3233509704478426
>>> gamma(171.5)
9.483367566824795e+307
>>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5))
True
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(-1.1)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(-4)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(172)
Traceback (most recent call last):
...
OverflowError: math range error
>>> gamma(1.1)
Traceback (most recent call last):
...
NotImplementedError: num must be an integer or a half-integer
"""
if num <= 0:
raise ValueError("math domain error")
if num > 171.5:
raise OverflowError("math range error")
elif num - int(num) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer")
elif num == 0.5:
return sqrt(pi)
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
def test_gamma() -> None:
"""
>>> test_gamma()
"""
assert gamma(0.5) == sqrt(pi)
assert gamma(1) == 1.0
assert gamma(2) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
num = 1.0
while num:
num = float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| """
Gamma function is a very useful tool in math and physics.
It helps calculating complex integral in a convenient way.
for more info: https://en.wikipedia.org/wiki/Gamma_function
Python's Standard Library math.gamma() function overflows around gamma(171.624).
"""
from math import pi, sqrt
def gamma(num: float) -> float:
"""
Calculates the value of Gamma function of num
where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...).
Implemented using recursion
Examples:
>>> from math import isclose, gamma as math_gamma
>>> gamma(0.5)
1.7724538509055159
>>> gamma(2)
1.0
>>> gamma(3.5)
3.3233509704478426
>>> gamma(171.5)
9.483367566824795e+307
>>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5))
True
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(-1.1)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(-4)
Traceback (most recent call last):
...
ValueError: math domain error
>>> gamma(172)
Traceback (most recent call last):
...
OverflowError: math range error
>>> gamma(1.1)
Traceback (most recent call last):
...
NotImplementedError: num must be an integer or a half-integer
"""
if num <= 0:
raise ValueError("math domain error")
if num > 171.5:
raise OverflowError("math range error")
elif num - int(num) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer")
elif num == 0.5:
return sqrt(pi)
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1)
def test_gamma() -> None:
"""
>>> test_gamma()
"""
assert gamma(0.5) == sqrt(pi)
assert gamma(1) == 1.0
assert gamma(2) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
num = 1.0
while num:
num = float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Three distinct points are plotted at random on a Cartesian plane,
for which -1000 ≤ x, y ≤ 1000, such that a triangle is formed.
Consider the following two triangles:
A(-340,495), B(-153,-910), C(835,-947)
X(-175,41), Y(-421,-714), Z(574,-645)
It can be verified that triangle ABC contains the origin, whereas
triangle XYZ does not.
Using triangles.txt (right click and 'Save Link/Target As...'), a 27K text
file containing the coordinates of one thousand "random" triangles, find
the number of triangles for which the interior contains the origin.
NOTE: The first two examples in the file represent the triangles in the
example given above.
"""
from __future__ import annotations
from pathlib import Path
def vector_product(point1: tuple[int, int], point2: tuple[int, int]) -> int:
"""
Return the 2-d vector product of two vectors.
>>> vector_product((1, 2), (-5, 0))
10
>>> vector_product((3, 1), (6, 10))
24
"""
return point1[0] * point2[1] - point1[1] * point2[0]
def contains_origin(x1: int, y1: int, x2: int, y2: int, x3: int, y3: int) -> bool:
"""
Check if the triangle given by the points A(x1, y1), B(x2, y2), C(x3, y3)
contains the origin.
>>> contains_origin(-340, 495, -153, -910, 835, -947)
True
>>> contains_origin(-175, 41, -421, -714, 574, -645)
False
"""
point_a: tuple[int, int] = (x1, y1)
point_a_to_b: tuple[int, int] = (x2 - x1, y2 - y1)
point_a_to_c: tuple[int, int] = (x3 - x1, y3 - y1)
a: float = -vector_product(point_a, point_a_to_b) / vector_product(
point_a_to_c, point_a_to_b
)
b: float = +vector_product(point_a, point_a_to_c) / vector_product(
point_a_to_c, point_a_to_b
)
return a > 0 and b > 0 and a + b < 1
def solution(filename: str = "p102_triangles.txt") -> int:
"""
Find the number of triangles whose interior contains the origin.
>>> solution("test_triangles.txt")
1
"""
data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8")
triangles: list[list[int]] = []
for line in data.strip().split("\n"):
triangles.append([int(number) for number in line.split(",")])
ret: int = 0
triangle: list[int]
for triangle in triangles:
ret += contains_origin(*triangle)
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| """
Three distinct points are plotted at random on a Cartesian plane,
for which -1000 ≤ x, y ≤ 1000, such that a triangle is formed.
Consider the following two triangles:
A(-340,495), B(-153,-910), C(835,-947)
X(-175,41), Y(-421,-714), Z(574,-645)
It can be verified that triangle ABC contains the origin, whereas
triangle XYZ does not.
Using triangles.txt (right click and 'Save Link/Target As...'), a 27K text
file containing the coordinates of one thousand "random" triangles, find
the number of triangles for which the interior contains the origin.
NOTE: The first two examples in the file represent the triangles in the
example given above.
"""
from __future__ import annotations
from pathlib import Path
def vector_product(point1: tuple[int, int], point2: tuple[int, int]) -> int:
"""
Return the 2-d vector product of two vectors.
>>> vector_product((1, 2), (-5, 0))
10
>>> vector_product((3, 1), (6, 10))
24
"""
return point1[0] * point2[1] - point1[1] * point2[0]
def contains_origin(x1: int, y1: int, x2: int, y2: int, x3: int, y3: int) -> bool:
"""
Check if the triangle given by the points A(x1, y1), B(x2, y2), C(x3, y3)
contains the origin.
>>> contains_origin(-340, 495, -153, -910, 835, -947)
True
>>> contains_origin(-175, 41, -421, -714, 574, -645)
False
"""
point_a: tuple[int, int] = (x1, y1)
point_a_to_b: tuple[int, int] = (x2 - x1, y2 - y1)
point_a_to_c: tuple[int, int] = (x3 - x1, y3 - y1)
a: float = -vector_product(point_a, point_a_to_b) / vector_product(
point_a_to_c, point_a_to_b
)
b: float = +vector_product(point_a, point_a_to_c) / vector_product(
point_a_to_c, point_a_to_b
)
return a > 0 and b > 0 and a + b < 1
def solution(filename: str = "p102_triangles.txt") -> int:
"""
Find the number of triangles whose interior contains the origin.
>>> solution("test_triangles.txt")
1
"""
data: str = Path(__file__).parent.joinpath(filename).read_text(encoding="utf-8")
triangles: list[list[int]] = []
for line in data.strip().split("\n"):
triangles.append([int(number) for number in line.split(",")])
ret: int = 0
triangle: list[int]
for triangle in triangles:
ret += contains_origin(*triangle)
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Conway's Game of Life implemented in Python.
https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
"""
from __future__ import annotations
from PIL import Image
# Define glider example
GLIDER = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def new_generation(cells: list[list[int]]) -> list[list[int]]:
"""
Generates the next generation for a given state of Conway's Game of Life.
>>> new_generation(BLINKER)
[[0, 0, 0], [1, 1, 1], [0, 0, 0]]
"""
next_generation = []
for i in range(len(cells)):
next_generation_row = []
for j in range(len(cells[i])):
# Get the number of live neighbours
neighbour_count = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i]) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i]) - 1:
neighbour_count += cells[i][j + 1]
if i < len(cells) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(cells) - 1:
neighbour_count += cells[i + 1][j]
if i < len(cells) - 1 and j < len(cells[i]) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
alive = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1)
else:
next_generation_row.append(0)
next_generation.append(next_generation_row)
return next_generation
def generate_images(cells: list[list[int]], frames: int) -> list[Image.Image]:
"""
Generates a list of images of subsequent Game of Life states.
"""
images = []
for _ in range(frames):
# Create output image
img = Image.new("RGB", (len(cells[0]), len(cells)))
pixels = img.load()
# Save cells to image
for x in range(len(cells)):
for y in range(len(cells[0])):
colour = 255 - cells[y][x] * 255
pixels[x, y] = (colour, colour, colour)
# Save image
images.append(img)
cells = new_generation(cells)
return images
if __name__ == "__main__":
images = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| """
Conway's Game of Life implemented in Python.
https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life
"""
from __future__ import annotations
from PIL import Image
# Define glider example
GLIDER = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
BLINKER = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def new_generation(cells: list[list[int]]) -> list[list[int]]:
"""
Generates the next generation for a given state of Conway's Game of Life.
>>> new_generation(BLINKER)
[[0, 0, 0], [1, 1, 1], [0, 0, 0]]
"""
next_generation = []
for i in range(len(cells)):
next_generation_row = []
for j in range(len(cells[i])):
# Get the number of live neighbours
neighbour_count = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i]) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i]) - 1:
neighbour_count += cells[i][j + 1]
if i < len(cells) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(cells) - 1:
neighbour_count += cells[i + 1][j]
if i < len(cells) - 1 and j < len(cells[i]) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
alive = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1)
else:
next_generation_row.append(0)
next_generation.append(next_generation_row)
return next_generation
def generate_images(cells: list[list[int]], frames: int) -> list[Image.Image]:
"""
Generates a list of images of subsequent Game of Life states.
"""
images = []
for _ in range(frames):
# Create output image
img = Image.new("RGB", (len(cells[0]), len(cells)))
pixels = img.load()
# Save cells to image
for x in range(len(cells)):
for y in range(len(cells[0])):
colour = 255 - cells[y][x] * 255
pixels[x, y] = (colour, colour, colour)
# Save image
images.append(img)
cells = new_generation(cells)
return images
if __name__ == "__main__":
images = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """Segmented Sieve."""
import math
def sieve(n: int) -> list[int]:
"""Segmented Sieve."""
in_prime = []
start = 2
end = int(math.sqrt(n)) # Size of every segment
temp = [True] * (end + 1)
prime = []
while start <= end:
if temp[start] is True:
in_prime.append(start)
for i in range(start * start, end + 1, start):
temp[i] = False
start += 1
prime += in_prime
low = end + 1
high = min(2 * end, n)
while low <= n:
temp = [True] * (high - low + 1)
for each in in_prime:
t = math.floor(low / each) * each
if t < low:
t += each
for j in range(t, high + 1, each):
temp[j - low] = False
for j in range(len(temp)):
if temp[j] is True:
prime.append(j + low)
low = high + 1
high = min(high + end, n)
return prime
print(sieve(10**6))
| """Segmented Sieve."""
import math
def sieve(n: int) -> list[int]:
"""Segmented Sieve."""
in_prime = []
start = 2
end = int(math.sqrt(n)) # Size of every segment
temp = [True] * (end + 1)
prime = []
while start <= end:
if temp[start] is True:
in_prime.append(start)
for i in range(start * start, end + 1, start):
temp[i] = False
start += 1
prime += in_prime
low = end + 1
high = min(2 * end, n)
while low <= n:
temp = [True] * (high - low + 1)
for each in in_prime:
t = math.floor(low / each) * each
if t < low:
t += each
for j in range(t, high + 1, each):
temp[j - low] = False
for j in range(len(temp)):
if temp[j] is True:
prime.append(j + low)
low = high + 1
high = min(high + end, n)
return prime
print(sieve(10**6))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Problem 16: https://projecteuler.net/problem=16
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
def solution(power: int = 1000) -> int:
"""Returns the sum of the digits of the number 2^power.
>>> solution(1000)
1366
>>> solution(50)
76
>>> solution(20)
31
>>> solution(15)
26
"""
n = 2**power
r = 0
while n:
r, n = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| """
Problem 16: https://projecteuler.net/problem=16
2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26.
What is the sum of the digits of the number 2^1000?
"""
def solution(power: int = 1000) -> int:
"""Returns the sum of the digits of the number 2^power.
>>> solution(1000)
1366
>>> solution(50)
76
>>> solution(20)
31
>>> solution(15)
26
"""
n = 2**power
r = 0
while n:
r, n = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000
digits?
"""
def solution(n: int = 1000) -> int:
"""Returns the index of the first term in the Fibonacci sequence to contain
n digits.
>>> solution(1000)
4782
>>> solution(100)
476
>>> solution(50)
237
>>> solution(3)
12
"""
f1, f2 = 1, 1
index = 2
while True:
i = 0
f = f1 + f2
f1, f2 = f2, f
index += 1
for _ in str(f):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| """
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000
digits?
"""
def solution(n: int = 1000) -> int:
"""Returns the index of the first term in the Fibonacci sequence to contain
n digits.
>>> solution(1000)
4782
>>> solution(100)
476
>>> solution(50)
237
>>> solution(3)
12
"""
f1, f2 = 1, 1
index = 2
while True:
i = 0
f = f1 + f2
f1, f2 = f2, f
index += 1
for _ in str(f):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 9: https://projecteuler.net/problem=9
Special Pythagorean triplet
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product a*b*c.
References:
- https://en.wikipedia.org/wiki/Pythagorean_triple
"""
def solution() -> int:
"""
Returns the product of a,b,c which are Pythagorean Triplet that satisfies
the following:
1. a**2 + b**2 = c**2
2. a + b + c = 1000
>>> solution()
31875000
"""
return [
a * b * (1000 - a - b)
for a in range(1, 999)
for b in range(a, 999)
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 9: https://projecteuler.net/problem=9
Special Pythagorean triplet
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product a*b*c.
References:
- https://en.wikipedia.org/wiki/Pythagorean_triple
"""
def solution() -> int:
"""
Returns the product of a,b,c which are Pythagorean Triplet that satisfies
the following:
1. a**2 + b**2 = c**2
2. a + b + c = 1000
>>> solution()
31875000
"""
return [
a * b * (1000 - a - b)
for a in range(1, 999)
for b in range(a, 999)
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Project Euler Problem 115: https://projecteuler.net/problem=115
NOTE: This is a more difficult version of Problem 114
(https://projecteuler.net/problem=114).
A row measuring n units in length has red blocks
with a minimum length of m units placed on it, such that any two red blocks
(which are allowed to be different lengths) are separated by at least one black square.
Let the fill-count function, F(m, n),
represent the number of ways that a row can be filled.
For example, F(3, 29) = 673135 and F(3, 30) = 1089155.
That is, for m = 3, it can be seen that n = 30 is the smallest value
for which the fill-count function first exceeds one million.
In the same way, for m = 10, it can be verified that
F(10, 56) = 880711 and F(10, 57) = 1148904, so n = 57 is the least value
for which the fill-count function first exceeds one million.
For m = 50, find the least value of n
for which the fill-count function first exceeds one million.
"""
from itertools import count
def solution(min_block_length: int = 50) -> int:
"""
Returns for given minimum block length the least value of n
for which the fill-count function first exceeds one million
>>> solution(3)
30
>>> solution(10)
57
"""
fill_count_functions = [1] * min_block_length
for n in count(min_block_length):
fill_count_functions.append(1)
for block_length in range(min_block_length, n + 1):
for block_start in range(n - block_length):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| """
Project Euler Problem 115: https://projecteuler.net/problem=115
NOTE: This is a more difficult version of Problem 114
(https://projecteuler.net/problem=114).
A row measuring n units in length has red blocks
with a minimum length of m units placed on it, such that any two red blocks
(which are allowed to be different lengths) are separated by at least one black square.
Let the fill-count function, F(m, n),
represent the number of ways that a row can be filled.
For example, F(3, 29) = 673135 and F(3, 30) = 1089155.
That is, for m = 3, it can be seen that n = 30 is the smallest value
for which the fill-count function first exceeds one million.
In the same way, for m = 10, it can be verified that
F(10, 56) = 880711 and F(10, 57) = 1148904, so n = 57 is the least value
for which the fill-count function first exceeds one million.
For m = 50, find the least value of n
for which the fill-count function first exceeds one million.
"""
from itertools import count
def solution(min_block_length: int = 50) -> int:
"""
Returns for given minimum block length the least value of n
for which the fill-count function first exceeds one million
>>> solution(3)
30
>>> solution(10)
57
"""
fill_count_functions = [1] * min_block_length
for n in count(min_block_length):
fill_count_functions.append(1)
for block_length in range(min_block_length, n + 1):
for block_start in range(n - block_length):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Illustrate how to implement inorder traversal in binary search tree.
Author: Gurneet Singh
https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/
"""
class BinaryTreeNode:
"""Defining the structure of BinaryTreeNode"""
def __init__(self, data: int) -> None:
self.data = data
self.left_child: BinaryTreeNode | None = None
self.right_child: BinaryTreeNode | None = None
def insert(node: BinaryTreeNode | None, new_value: int) -> BinaryTreeNode | None:
"""
If the binary search tree is empty, make a new node and declare it as root.
>>> node_a = BinaryTreeNode(12345)
>>> node_b = insert(node_a, 67890)
>>> node_a.left_child == node_b.left_child
True
>>> node_a.right_child == node_b.right_child
True
>>> node_a.data == node_b.data
True
"""
if node is None:
node = BinaryTreeNode(new_value)
return node
# binary search tree is not empty,
# so we will insert it into the tree
# if new_value is less than value of data in node,
# add it to left subtree and proceed recursively
if new_value < node.data:
node.left_child = insert(node.left_child, new_value)
else:
# if new_value is greater than value of data in node,
# add it to right subtree and proceed recursively
node.right_child = insert(node.right_child, new_value)
return node
def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return
"""
>>> inorder(make_tree())
[6, 10, 14, 15, 20, 25, 60]
"""
if node:
inorder_array = inorder(node.left_child)
inorder_array = inorder_array + [node.data]
inorder_array = inorder_array + inorder(node.right_child)
else:
inorder_array = []
return inorder_array
def make_tree() -> BinaryTreeNode | None:
root = insert(None, 15)
insert(root, 10)
insert(root, 25)
insert(root, 6)
insert(root, 14)
insert(root, 20)
insert(root, 60)
return root
def main() -> None:
# main function
root = make_tree()
print("Printing values of binary search tree in Inorder Traversal.")
inorder(root)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| """
Illustrate how to implement inorder traversal in binary search tree.
Author: Gurneet Singh
https://www.geeksforgeeks.org/tree-traversals-inorder-preorder-and-postorder/
"""
class BinaryTreeNode:
"""Defining the structure of BinaryTreeNode"""
def __init__(self, data: int) -> None:
self.data = data
self.left_child: BinaryTreeNode | None = None
self.right_child: BinaryTreeNode | None = None
def insert(node: BinaryTreeNode | None, new_value: int) -> BinaryTreeNode | None:
"""
If the binary search tree is empty, make a new node and declare it as root.
>>> node_a = BinaryTreeNode(12345)
>>> node_b = insert(node_a, 67890)
>>> node_a.left_child == node_b.left_child
True
>>> node_a.right_child == node_b.right_child
True
>>> node_a.data == node_b.data
True
"""
if node is None:
node = BinaryTreeNode(new_value)
return node
# binary search tree is not empty,
# so we will insert it into the tree
# if new_value is less than value of data in node,
# add it to left subtree and proceed recursively
if new_value < node.data:
node.left_child = insert(node.left_child, new_value)
else:
# if new_value is greater than value of data in node,
# add it to right subtree and proceed recursively
node.right_child = insert(node.right_child, new_value)
return node
def inorder(node: None | BinaryTreeNode) -> list[int]: # if node is None,return
"""
>>> inorder(make_tree())
[6, 10, 14, 15, 20, 25, 60]
"""
if node:
inorder_array = inorder(node.left_child)
inorder_array = inorder_array + [node.data]
inorder_array = inorder_array + inorder(node.right_child)
else:
inorder_array = []
return inorder_array
def make_tree() -> BinaryTreeNode | None:
root = insert(None, 15)
insert(root, 10)
insert(root, 25)
insert(root, 6)
insert(root, 14)
insert(root, 20)
insert(root, 60)
return root
def main() -> None:
# main function
root = make_tree()
print("Printing values of binary search tree in Inorder Traversal.")
inorder(root)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # https://en.wikipedia.org/wiki/Hill_climbing
import math
class SearchProblem:
"""
An interface to define search problems.
The interface will be illustrated using the example of mathematical function.
"""
def __init__(self, x: int, y: int, step_size: int, function_to_optimize):
"""
The constructor of the search problem.
x: the x coordinate of the current search state.
y: the y coordinate of the current search state.
step_size: size of the step to take when looking for neighbors.
function_to_optimize: a function to optimize having the signature f(x, y).
"""
self.x = x
self.y = y
self.step_size = step_size
self.function = function_to_optimize
def score(self) -> int:
"""
Returns the output of the function called with current x and y coordinates.
>>> def test_function(x, y):
... return x + y
>>> SearchProblem(0, 0, 1, test_function).score() # 0 + 0 = 0
0
>>> SearchProblem(5, 7, 1, test_function).score() # 5 + 7 = 12
12
"""
return self.function(self.x, self.y)
def get_neighbors(self):
"""
Returns a list of coordinates of neighbors adjacent to the current coordinates.
Neighbors:
| 0 | 1 | 2 |
| 3 | _ | 4 |
| 5 | 6 | 7 |
"""
step_size = self.step_size
return [
SearchProblem(x, y, step_size, self.function)
for x, y in (
(self.x - step_size, self.y - step_size),
(self.x - step_size, self.y),
(self.x - step_size, self.y + step_size),
(self.x, self.y - step_size),
(self.x, self.y + step_size),
(self.x + step_size, self.y - step_size),
(self.x + step_size, self.y),
(self.x + step_size, self.y + step_size),
)
]
def __hash__(self):
"""
hash the string representation of the current search state.
"""
return hash(str(self))
def __eq__(self, obj):
"""
Check if the 2 objects are equal.
"""
if isinstance(obj, SearchProblem):
return hash(str(self)) == hash(str(obj))
return False
def __str__(self):
"""
string representation of the current search state.
>>> str(SearchProblem(0, 0, 1, None))
'x: 0 y: 0'
>>> str(SearchProblem(2, 5, 1, None))
'x: 2 y: 5'
"""
return f"x: {self.x} y: {self.y}"
def hill_climbing(
search_prob,
find_max: bool = True,
max_x: float = math.inf,
min_x: float = -math.inf,
max_y: float = math.inf,
min_y: float = -math.inf,
visualization: bool = False,
max_iter: int = 10000,
) -> SearchProblem:
"""
Implementation of the hill climbling algorithm.
We start with a given state, find all its neighbors,
move towards the neighbor which provides the maximum (or minimum) change.
We keep doing this until we are at a state where we do not have any
neighbors which can improve the solution.
Args:
search_prob: The search state at the start.
find_max: If True, the algorithm should find the maximum else the minimum.
max_x, min_x, max_y, min_y: the maximum and minimum bounds of x and y.
visualization: If True, a matplotlib graph is displayed.
max_iter: number of times to run the iteration.
Returns a search state having the maximum (or minimum) score.
"""
current_state = search_prob
scores = [] # list to store the current score at each iteration
iterations = 0
solution_found = False
visited = set()
while not solution_found and iterations < max_iter:
visited.add(current_state)
iterations += 1
current_score = current_state.score()
scores.append(current_score)
neighbors = current_state.get_neighbors()
max_change = -math.inf
min_change = math.inf
next_state = None # to hold the next best neighbor
for neighbor in neighbors:
if neighbor in visited:
continue # do not want to visit the same state again
if (
neighbor.x > max_x
or neighbor.x < min_x
or neighbor.y > max_y
or neighbor.y < min_y
):
continue # neighbor outside our bounds
change = neighbor.score() - current_score
if find_max: # finding max
# going to direction with greatest ascent
if change > max_change and change > 0:
max_change = change
next_state = neighbor
else: # finding min
# to direction with greatest descent
if change < min_change and change < 0:
min_change = change
next_state = neighbor
if next_state is not None:
# we found at least one neighbor which improved the current state
current_state = next_state
else:
# since we have no neighbor that improves the solution we stop the search
solution_found = True
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(iterations), scores)
plt.xlabel("Iterations")
plt.ylabel("Function values")
plt.show()
return current_state
if __name__ == "__main__":
import doctest
doctest.testmod()
def test_f1(x, y):
return (x**2) + (y**2)
# starting the problem with initial coordinates (3, 4)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=False)
print(
"The minimum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
prob = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def test_f2(x, y):
return (3 * x**2) - (6 * y)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=True)
print(
"The maximum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
| # https://en.wikipedia.org/wiki/Hill_climbing
import math
class SearchProblem:
"""
An interface to define search problems.
The interface will be illustrated using the example of mathematical function.
"""
def __init__(self, x: int, y: int, step_size: int, function_to_optimize):
"""
The constructor of the search problem.
x: the x coordinate of the current search state.
y: the y coordinate of the current search state.
step_size: size of the step to take when looking for neighbors.
function_to_optimize: a function to optimize having the signature f(x, y).
"""
self.x = x
self.y = y
self.step_size = step_size
self.function = function_to_optimize
def score(self) -> int:
"""
Returns the output of the function called with current x and y coordinates.
>>> def test_function(x, y):
... return x + y
>>> SearchProblem(0, 0, 1, test_function).score() # 0 + 0 = 0
0
>>> SearchProblem(5, 7, 1, test_function).score() # 5 + 7 = 12
12
"""
return self.function(self.x, self.y)
def get_neighbors(self):
"""
Returns a list of coordinates of neighbors adjacent to the current coordinates.
Neighbors:
| 0 | 1 | 2 |
| 3 | _ | 4 |
| 5 | 6 | 7 |
"""
step_size = self.step_size
return [
SearchProblem(x, y, step_size, self.function)
for x, y in (
(self.x - step_size, self.y - step_size),
(self.x - step_size, self.y),
(self.x - step_size, self.y + step_size),
(self.x, self.y - step_size),
(self.x, self.y + step_size),
(self.x + step_size, self.y - step_size),
(self.x + step_size, self.y),
(self.x + step_size, self.y + step_size),
)
]
def __hash__(self):
"""
hash the string representation of the current search state.
"""
return hash(str(self))
def __eq__(self, obj):
"""
Check if the 2 objects are equal.
"""
if isinstance(obj, SearchProblem):
return hash(str(self)) == hash(str(obj))
return False
def __str__(self):
"""
string representation of the current search state.
>>> str(SearchProblem(0, 0, 1, None))
'x: 0 y: 0'
>>> str(SearchProblem(2, 5, 1, None))
'x: 2 y: 5'
"""
return f"x: {self.x} y: {self.y}"
def hill_climbing(
search_prob,
find_max: bool = True,
max_x: float = math.inf,
min_x: float = -math.inf,
max_y: float = math.inf,
min_y: float = -math.inf,
visualization: bool = False,
max_iter: int = 10000,
) -> SearchProblem:
"""
Implementation of the hill climbling algorithm.
We start with a given state, find all its neighbors,
move towards the neighbor which provides the maximum (or minimum) change.
We keep doing this until we are at a state where we do not have any
neighbors which can improve the solution.
Args:
search_prob: The search state at the start.
find_max: If True, the algorithm should find the maximum else the minimum.
max_x, min_x, max_y, min_y: the maximum and minimum bounds of x and y.
visualization: If True, a matplotlib graph is displayed.
max_iter: number of times to run the iteration.
Returns a search state having the maximum (or minimum) score.
"""
current_state = search_prob
scores = [] # list to store the current score at each iteration
iterations = 0
solution_found = False
visited = set()
while not solution_found and iterations < max_iter:
visited.add(current_state)
iterations += 1
current_score = current_state.score()
scores.append(current_score)
neighbors = current_state.get_neighbors()
max_change = -math.inf
min_change = math.inf
next_state = None # to hold the next best neighbor
for neighbor in neighbors:
if neighbor in visited:
continue # do not want to visit the same state again
if (
neighbor.x > max_x
or neighbor.x < min_x
or neighbor.y > max_y
or neighbor.y < min_y
):
continue # neighbor outside our bounds
change = neighbor.score() - current_score
if find_max: # finding max
# going to direction with greatest ascent
if change > max_change and change > 0:
max_change = change
next_state = neighbor
else: # finding min
# to direction with greatest descent
if change < min_change and change < 0:
min_change = change
next_state = neighbor
if next_state is not None:
# we found at least one neighbor which improved the current state
current_state = next_state
else:
# since we have no neighbor that improves the solution we stop the search
solution_found = True
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(iterations), scores)
plt.xlabel("Iterations")
plt.ylabel("Function values")
plt.show()
return current_state
if __name__ == "__main__":
import doctest
doctest.testmod()
def test_f1(x, y):
return (x**2) + (y**2)
# starting the problem with initial coordinates (3, 4)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=False)
print(
"The minimum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
prob = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def test_f2(x, y):
return (3 * x**2) - (6 * y)
prob = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_f1)
local_min = hill_climbing(prob, find_max=True)
print(
"The maximum score for f(x, y) = x^2 + y^2 found via hill climbing: "
f"{local_min.score()}"
)
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Finding the peak of a unimodal list using divide and conquer.
A unimodal array is defined as follows: array is increasing up to index p,
then decreasing afterwards. (for p >= 1)
An obvious solution can be performed in O(n),
to find the maximum of the array.
(From Kleinberg and Tardos. Algorithm Design.
Addison Wesley 2006: Chapter 5 Solved Exercise 1)
"""
from __future__ import annotations
def peak(lst: list[int]) -> int:
"""
Return the peak value of `lst`.
>>> peak([1, 2, 3, 4, 5, 4, 3, 2, 1])
5
>>> peak([1, 10, 9, 8, 7, 6, 5, 4])
10
>>> peak([1, 9, 8, 7])
9
>>> peak([1, 2, 3, 4, 5, 6, 7, 0])
7
>>> peak([1, 2, 3, 4, 3, 2, 1, 0, -1, -2])
4
"""
# middle index
m = len(lst) // 2
# choose the middle 3 elements
three = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Finding the peak of a unimodal list using divide and conquer.
A unimodal array is defined as follows: array is increasing up to index p,
then decreasing afterwards. (for p >= 1)
An obvious solution can be performed in O(n),
to find the maximum of the array.
(From Kleinberg and Tardos. Algorithm Design.
Addison Wesley 2006: Chapter 5 Solved Exercise 1)
"""
from __future__ import annotations
def peak(lst: list[int]) -> int:
"""
Return the peak value of `lst`.
>>> peak([1, 2, 3, 4, 5, 4, 3, 2, 1])
5
>>> peak([1, 10, 9, 8, 7, 6, 5, 4])
10
>>> peak([1, 9, 8, 7])
9
>>> peak([1, 2, 3, 4, 5, 6, 7, 0])
7
>>> peak([1, 2, 3, 4, 3, 2, 1, 0, -1, -2])
4
"""
# middle index
m = len(lst) // 2
# choose the middle 3 elements
three = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m]) == 2:
m -= 1
return peak(lst[m:])
# decreasing
else:
if len(lst[:m]) == 2:
m += 1
return peak(lst[:m])
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from collections.abc import Callable
import numpy as np
def euler_modified(
ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float
) -> np.array:
"""
Calculate solution at each step to an ODE using Euler's Modified Method
The Euler Method is straightforward to implement, but can't give accurate solutions.
So, some changes were proposed to improve accuracy.
https://en.wikipedia.org/wiki/Euler_method
Arguments:
ode_func -- The ode as a function of x and y
y0 -- the initial value for y
x0 -- the initial value for x
stepsize -- the increment value for x
x_end -- the end value for x
>>> # the exact solution is math.exp(x)
>>> def f1(x, y):
... return -2*x*(y**2)
>>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0)
>>> y[-1]
0.503338255442106
>>> import math
>>> def f2(x, y):
... return -2*y + (x**3)*math.exp(-2*x)
>>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3)
>>> y[-1]
0.5525976431951775
"""
n = int(np.ceil((x_end - x0) / step_size))
y = np.zeros((n + 1,))
y[0] = y0
x = x0
for k in range(n):
y_get = y[k] + step_size * ode_func(x, y[k])
y[k + 1] = y[k] + (
(step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| from collections.abc import Callable
import numpy as np
def euler_modified(
ode_func: Callable, y0: float, x0: float, step_size: float, x_end: float
) -> np.array:
"""
Calculate solution at each step to an ODE using Euler's Modified Method
The Euler Method is straightforward to implement, but can't give accurate solutions.
So, some changes were proposed to improve accuracy.
https://en.wikipedia.org/wiki/Euler_method
Arguments:
ode_func -- The ode as a function of x and y
y0 -- the initial value for y
x0 -- the initial value for x
stepsize -- the increment value for x
x_end -- the end value for x
>>> # the exact solution is math.exp(x)
>>> def f1(x, y):
... return -2*x*(y**2)
>>> y = euler_modified(f1, 1.0, 0.0, 0.2, 1.0)
>>> y[-1]
0.503338255442106
>>> import math
>>> def f2(x, y):
... return -2*y + (x**3)*math.exp(-2*x)
>>> y = euler_modified(f2, 1.0, 0.0, 0.1, 0.3)
>>> y[-1]
0.5525976431951775
"""
n = int(np.ceil((x_end - x0) / step_size))
y = np.zeros((n + 1,))
y[0] = y0
x = x0
for k in range(n):
y_get = y[k] + step_size * ode_func(x, y[k])
y[k + 1] = y[k] + (
(step_size / 2) * (ode_func(x, y[k]) + ode_func(x + step_size, y_get))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Linear Discriminant Analysis
Assumptions About Data :
1. The input variables has a gaussian distribution.
2. The variance calculated for each input variables by class grouping is the
same.
3. The mix of classes in your training set is representative of the problem.
Learning The Model :
The LDA model requires the estimation of statistics from the training data :
1. Mean of each input value for each class.
2. Probability of an instance belong to each class.
3. Covariance for the input data for each class
Calculate the class means :
mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
Calculate the class probabilities :
P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
Calculate the variance :
We can calculate the variance for dataset in two steps :
1. Calculate the squared difference for each input variable from the
group mean.
2. Calculate the mean of the squared difference.
------------------------------------------------
Squared_Difference = (x - mean(k)) ** 2
Variance = (1 / (count(x) - count(classes))) *
(for i = 1 to i = n --> sum(Squared_Difference(xi)))
Making Predictions :
discriminant(x) = x * (mean / variance) -
((mean ** 2) / (2 * variance)) + Ln(probability)
---------------------------------------------------------------------------
After calculating the discriminant value for each class, the class with the
largest discriminant value is taken as the prediction.
Author: @EverLookNeverSee
"""
from collections.abc import Callable
from math import log
from os import name, system
from random import gauss, seed
from typing import TypeVar
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
"""
Generate gaussian distribution instances based-on given mean and standard deviation
:param mean: mean value of class
:param std_dev: value of standard deviation entered by usr or default value of it
:param instance_count: instance number of class
:return: a list containing generated values based-on given mean, std_dev and
instance_count
>>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
[6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
"""
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
"""
Generate y values for corresponding classes
:param class_count: Number of classes(data groupings) in dataset
:param instance_count: number of instances in class
:return: corresponding values for data groupings in dataset
>>> y_generator(1, [10])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> y_generator(2, [5, 10])
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
"""
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
"""
Calculate given class mean
:param instance_count: Number of instances in class
:param items: items that related to specific class(data grouping)
:return: calculated actual mean of considered class
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> calculate_mean(len(items), items)
5.011267842911003
"""
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
"""
Calculate the probability that a given instance will belong to which class
:param instance_count: number of instances in class
:param total_count: the number of all instances
:return: value of probability for considered class
>>> calculate_probabilities(20, 60)
0.3333333333333333
>>> calculate_probabilities(30, 100)
0.3
"""
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
"""
Calculate the variance
:param items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param total_count: the number of all instances
:return: calculated variance for considered dataset
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> means = [5.011267842911003]
>>> total_count = 20
>>> calculate_variance([items], means, total_count)
0.9618530973487491
"""
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for j in range(len(items[i])):
# appending squared differences to 'squared_diff' list
squared_diff.append((items[i][j] - means[i]) ** 2)
# one divided by (the number of all instances - number of classes) multiplied by
# sum of all squared differences
n_classes = len(means) # Number of classes in dataset
return 1 / (total_count - n_classes) * sum(squared_diff)
# Making predictions
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
"""This function predicts new indexes(groups for our data)
:param x_items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param variance: calculated value of variance by calculate_variance function
:param probabilities: a list containing all probabilities of classes
:return: a list containing predicted Y values
>>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
... 3.977896829989127, 3.56317055489747, 5.199311976483754,
... 5.133374604658605, 5.546468300338232, 4.086029056264687,
... 5.005005283626573, 4.935258239627312, 3.494170998739258,
... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
... 5.202969177309964, 4.855297691835079], [11.288184753155463,
... 11.44944560869977, 10.066335808938263, 9.235456349028368,
... 8.907826784895859, 10.031334516831716, 8.977896829989128,
... 8.56317055489747, 10.199311976483754, 10.133374604658606,
... 10.546468300338232, 9.086029056264687, 10.005005283626572,
... 9.935258239627313, 8.494170998739259, 10.537997178661033,
... 10.320711100998848, 12.389112043240686, 10.202969177309964,
... 9.85529769183508], [16.288184753155463, 16.449445608699772,
... 15.066335808938263, 14.235456349028368, 13.907826784895859,
... 15.031334516831716, 13.977896829989128, 13.56317055489747,
... 15.199311976483754, 15.133374604658606, 15.546468300338232,
... 14.086029056264687, 15.005005283626572, 14.935258239627313,
... 13.494170998739259, 15.537997178661033, 15.320711100998848,
... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
>>> variance = 0.9618530973487494
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
>>> predict_y_values(x_items, means, variance,
... probabilities) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2]
"""
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
# for loop iterates over number of inner items of each element
for j in range(len(x_items[i])):
temp = [] # to store all discriminant values of each item as a list
# for loop iterates over number of classes we have in our dataset
for k in range(len(x_items)):
# appending values of discriminants for each class to 'temp' list
temp.append(
x_items[i][j] * (means[k] / variance)
- (means[k] ** 2 / (2 * variance))
+ log(probabilities[k])
)
# appending discriminant values of each item to 'results' list
results.append(temp)
return [result.index(max(result)) for result in results]
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
"""
Calculate the value of accuracy based-on predictions
:param actual_y:a list containing initial Y values generated by 'y_generator'
function
:param predicted_y: a list containing predicted Y values generated by
'predict_y_values' function
:return: percentage of accuracy
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
... 1, 1 ,1 ,1 ,1 ,1 ,1]
>>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
... 0, 0, 1, 1, 1, 0, 1, 1, 1]
>>> accuracy(actual_y, predicted_y)
50.0
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> accuracy(actual_y, predicted_y)
100.0
"""
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100
num = TypeVar("num")
def valid_input(
input_type: Callable[[object], num], # Usually float or int
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda x: True,
default: str = None,
) -> num:
"""
Ask for user value and validate that it fulfill a condition.
:input_type: user input expected type of value
:input_msg: message to show user in the screen
:err_msg: message to show in the screen in case of error
:condition: function that represents the condition that user input is valid.
:default: Default value in case the user does not type anything
:return: user's input
"""
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
if condition(user_input):
return user_input
else:
print(f"{user_input}: {err_msg}")
continue
except ValueError:
print(
f"{user_input}: Incorrect input type, expected {input_type.__name__!r}"
)
# Main Function
def main():
"""This function starts execution phase"""
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
print("First of all we should specify the number of classes that")
print("we want to generate as training dataset")
# Trying to get number of classes
n_classes = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg="Enter the number of classes (Data Groupings): ",
err_msg="Number of classes should be positive!",
)
print("-" * 100)
# Trying to get the value of standard deviation
std_dev = valid_input(
input_type=float,
condition=lambda x: x >= 0,
input_msg=(
"Enter the value of standard deviation"
"(Default value is 1.0 for all classes): "
),
err_msg="Standard deviation should not be negative!",
default="1.0",
)
print("-" * 100)
# Trying to get number of instances in classes and theirs means to generate
# dataset
counts = [] # An empty list to store instance counts of classes in dataset
for i in range(n_classes):
user_count = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg=(f"Enter The number of instances for class_{i+1}: "),
err_msg="Number of instances should be positive!",
)
counts.append(user_count)
print("-" * 100)
# An empty list to store values of user-entered means of classes
user_means = []
for a in range(n_classes):
user_mean = valid_input(
input_type=float,
input_msg=(f"Enter the value of mean for class_{a+1}: "),
err_msg="This is an invalid value.",
)
user_means.append(user_mean)
print("-" * 100)
print("Standard deviation: ", std_dev)
# print out the number of instances in classes in separated line
for i, count in enumerate(counts, 1):
print(f"Number of instances in class_{i} is: {count}")
print("-" * 100)
# print out mean values of classes separated line
for i, user_mean in enumerate(user_means, 1):
print(f"Mean of class_{i} is: {user_mean}")
print("-" * 100)
# Generating training dataset drawn from gaussian distribution
x = [
gaussian_distribution(user_means[j], std_dev, counts[j])
for j in range(n_classes)
]
print("Generated Normal Distribution: \n", x)
print("-" * 100)
# Generating Ys to detecting corresponding classes
y = y_generator(n_classes, counts)
print("Generated Corresponding Ys: \n", y)
print("-" * 100)
# Calculating the value of actual mean for each class
actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
# for loop iterates over number of elements in 'actual_means' list and print
# out them in separated line
for i, actual_mean in enumerate(actual_means, 1):
print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
print("-" * 100)
# Calculating the value of probabilities for each class
probabilities = [
calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
]
# for loop iterates over number of elements in 'probabilities' list and print
# out them in separated line
for i, probability in enumerate(probabilities, 1):
print(f"Probability of class_{i} is: {probability}")
print("-" * 100)
# Calculating the values of variance for each class
variance = calculate_variance(x, actual_means, sum(counts))
print("Variance: ", variance)
print("-" * 100)
# Predicting Y values
# storing predicted Y values in 'pre_indexes' variable
pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
print("-" * 100)
# Calculating Accuracy of the model
print(f"Accuracy: {accuracy(y, pre_indexes)}")
print("-" * 100)
print(" DONE ".center(100, "+"))
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
system("cls" if name == "nt" else "clear")
if __name__ == "__main__":
main()
| """
Linear Discriminant Analysis
Assumptions About Data :
1. The input variables has a gaussian distribution.
2. The variance calculated for each input variables by class grouping is the
same.
3. The mix of classes in your training set is representative of the problem.
Learning The Model :
The LDA model requires the estimation of statistics from the training data :
1. Mean of each input value for each class.
2. Probability of an instance belong to each class.
3. Covariance for the input data for each class
Calculate the class means :
mean(x) = 1/n ( for i = 1 to i = n --> sum(xi))
Calculate the class probabilities :
P(y = 0) = count(y = 0) / (count(y = 0) + count(y = 1))
P(y = 1) = count(y = 1) / (count(y = 0) + count(y = 1))
Calculate the variance :
We can calculate the variance for dataset in two steps :
1. Calculate the squared difference for each input variable from the
group mean.
2. Calculate the mean of the squared difference.
------------------------------------------------
Squared_Difference = (x - mean(k)) ** 2
Variance = (1 / (count(x) - count(classes))) *
(for i = 1 to i = n --> sum(Squared_Difference(xi)))
Making Predictions :
discriminant(x) = x * (mean / variance) -
((mean ** 2) / (2 * variance)) + Ln(probability)
---------------------------------------------------------------------------
After calculating the discriminant value for each class, the class with the
largest discriminant value is taken as the prediction.
Author: @EverLookNeverSee
"""
from collections.abc import Callable
from math import log
from os import name, system
from random import gauss, seed
from typing import TypeVar
# Make a training dataset drawn from a gaussian distribution
def gaussian_distribution(mean: float, std_dev: float, instance_count: int) -> list:
"""
Generate gaussian distribution instances based-on given mean and standard deviation
:param mean: mean value of class
:param std_dev: value of standard deviation entered by usr or default value of it
:param instance_count: instance number of class
:return: a list containing generated values based-on given mean, std_dev and
instance_count
>>> gaussian_distribution(5.0, 1.0, 20) # doctest: +NORMALIZE_WHITESPACE
[6.288184753155463, 6.4494456086997705, 5.066335808938262, 4.235456349028368,
3.9078267848958586, 5.031334516831717, 3.977896829989127, 3.56317055489747,
5.199311976483754, 5.133374604658605, 5.546468300338232, 4.086029056264687,
5.005005283626573, 4.935258239627312, 3.494170998739258, 5.537997178661033,
5.320711100998849, 7.3891120432406865, 5.202969177309964, 4.855297691835079]
"""
seed(1)
return [gauss(mean, std_dev) for _ in range(instance_count)]
# Make corresponding Y flags to detecting classes
def y_generator(class_count: int, instance_count: list) -> list:
"""
Generate y values for corresponding classes
:param class_count: Number of classes(data groupings) in dataset
:param instance_count: number of instances in class
:return: corresponding values for data groupings in dataset
>>> y_generator(1, [10])
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
>>> y_generator(2, [5, 10])
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> y_generator(4, [10, 5, 15, 20]) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
"""
return [k for k in range(class_count) for _ in range(instance_count[k])]
# Calculate the class means
def calculate_mean(instance_count: int, items: list) -> float:
"""
Calculate given class mean
:param instance_count: Number of instances in class
:param items: items that related to specific class(data grouping)
:return: calculated actual mean of considered class
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> calculate_mean(len(items), items)
5.011267842911003
"""
# the sum of all items divided by number of instances
return sum(items) / instance_count
# Calculate the class probabilities
def calculate_probabilities(instance_count: int, total_count: int) -> float:
"""
Calculate the probability that a given instance will belong to which class
:param instance_count: number of instances in class
:param total_count: the number of all instances
:return: value of probability for considered class
>>> calculate_probabilities(20, 60)
0.3333333333333333
>>> calculate_probabilities(30, 100)
0.3
"""
# number of instances in specific class divided by number of all instances
return instance_count / total_count
# Calculate the variance
def calculate_variance(items: list, means: list, total_count: int) -> float:
"""
Calculate the variance
:param items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param total_count: the number of all instances
:return: calculated variance for considered dataset
>>> items = gaussian_distribution(5.0, 1.0, 20)
>>> means = [5.011267842911003]
>>> total_count = 20
>>> calculate_variance([items], means, total_count)
0.9618530973487491
"""
squared_diff = [] # An empty list to store all squared differences
# iterate over number of elements in items
for i in range(len(items)):
# for loop iterates over number of elements in inner layer of items
for j in range(len(items[i])):
# appending squared differences to 'squared_diff' list
squared_diff.append((items[i][j] - means[i]) ** 2)
# one divided by (the number of all instances - number of classes) multiplied by
# sum of all squared differences
n_classes = len(means) # Number of classes in dataset
return 1 / (total_count - n_classes) * sum(squared_diff)
# Making predictions
def predict_y_values(
x_items: list, means: list, variance: float, probabilities: list
) -> list:
"""This function predicts new indexes(groups for our data)
:param x_items: a list containing all items(gaussian distribution of all classes)
:param means: a list containing real mean values of each class
:param variance: calculated value of variance by calculate_variance function
:param probabilities: a list containing all probabilities of classes
:return: a list containing predicted Y values
>>> x_items = [[6.288184753155463, 6.4494456086997705, 5.066335808938262,
... 4.235456349028368, 3.9078267848958586, 5.031334516831717,
... 3.977896829989127, 3.56317055489747, 5.199311976483754,
... 5.133374604658605, 5.546468300338232, 4.086029056264687,
... 5.005005283626573, 4.935258239627312, 3.494170998739258,
... 5.537997178661033, 5.320711100998849, 7.3891120432406865,
... 5.202969177309964, 4.855297691835079], [11.288184753155463,
... 11.44944560869977, 10.066335808938263, 9.235456349028368,
... 8.907826784895859, 10.031334516831716, 8.977896829989128,
... 8.56317055489747, 10.199311976483754, 10.133374604658606,
... 10.546468300338232, 9.086029056264687, 10.005005283626572,
... 9.935258239627313, 8.494170998739259, 10.537997178661033,
... 10.320711100998848, 12.389112043240686, 10.202969177309964,
... 9.85529769183508], [16.288184753155463, 16.449445608699772,
... 15.066335808938263, 14.235456349028368, 13.907826784895859,
... 15.031334516831716, 13.977896829989128, 13.56317055489747,
... 15.199311976483754, 15.133374604658606, 15.546468300338232,
... 14.086029056264687, 15.005005283626572, 14.935258239627313,
... 13.494170998739259, 15.537997178661033, 15.320711100998848,
... 17.389112043240686, 15.202969177309964, 14.85529769183508]]
>>> means = [5.011267842911003, 10.011267842911003, 15.011267842911002]
>>> variance = 0.9618530973487494
>>> probabilities = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333]
>>> predict_y_values(x_items, means, variance,
... probabilities) # doctest: +NORMALIZE_WHITESPACE
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2]
"""
# An empty list to store generated discriminant values of all items in dataset for
# each class
results = []
# for loop iterates over number of elements in list
for i in range(len(x_items)):
# for loop iterates over number of inner items of each element
for j in range(len(x_items[i])):
temp = [] # to store all discriminant values of each item as a list
# for loop iterates over number of classes we have in our dataset
for k in range(len(x_items)):
# appending values of discriminants for each class to 'temp' list
temp.append(
x_items[i][j] * (means[k] / variance)
- (means[k] ** 2 / (2 * variance))
+ log(probabilities[k])
)
# appending discriminant values of each item to 'results' list
results.append(temp)
return [result.index(max(result)) for result in results]
# Calculating Accuracy
def accuracy(actual_y: list, predicted_y: list) -> float:
"""
Calculate the value of accuracy based-on predictions
:param actual_y:a list containing initial Y values generated by 'y_generator'
function
:param predicted_y: a list containing predicted Y values generated by
'predict_y_values' function
:return: percentage of accuracy
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
... 1, 1 ,1 ,1 ,1 ,1 ,1]
>>> predicted_y = [0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0,
... 0, 0, 1, 1, 1, 0, 1, 1, 1]
>>> accuracy(actual_y, predicted_y)
50.0
>>> actual_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> predicted_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
... 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
>>> accuracy(actual_y, predicted_y)
100.0
"""
# iterate over one element of each list at a time (zip mode)
# prediction is correct if actual Y value equals to predicted Y value
correct = sum(1 for i, j in zip(actual_y, predicted_y) if i == j)
# percentage of accuracy equals to number of correct predictions divided by number
# of all data and multiplied by 100
return (correct / len(actual_y)) * 100
num = TypeVar("num")
def valid_input(
input_type: Callable[[object], num], # Usually float or int
input_msg: str,
err_msg: str,
condition: Callable[[num], bool] = lambda x: True,
default: str = None,
) -> num:
"""
Ask for user value and validate that it fulfill a condition.
:input_type: user input expected type of value
:input_msg: message to show user in the screen
:err_msg: message to show in the screen in case of error
:condition: function that represents the condition that user input is valid.
:default: Default value in case the user does not type anything
:return: user's input
"""
while True:
try:
user_input = input_type(input(input_msg).strip() or default)
if condition(user_input):
return user_input
else:
print(f"{user_input}: {err_msg}")
continue
except ValueError:
print(
f"{user_input}: Incorrect input type, expected {input_type.__name__!r}"
)
# Main Function
def main():
"""This function starts execution phase"""
while True:
print(" Linear Discriminant Analysis ".center(50, "*"))
print("*" * 50, "\n")
print("First of all we should specify the number of classes that")
print("we want to generate as training dataset")
# Trying to get number of classes
n_classes = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg="Enter the number of classes (Data Groupings): ",
err_msg="Number of classes should be positive!",
)
print("-" * 100)
# Trying to get the value of standard deviation
std_dev = valid_input(
input_type=float,
condition=lambda x: x >= 0,
input_msg=(
"Enter the value of standard deviation"
"(Default value is 1.0 for all classes): "
),
err_msg="Standard deviation should not be negative!",
default="1.0",
)
print("-" * 100)
# Trying to get number of instances in classes and theirs means to generate
# dataset
counts = [] # An empty list to store instance counts of classes in dataset
for i in range(n_classes):
user_count = valid_input(
input_type=int,
condition=lambda x: x > 0,
input_msg=(f"Enter The number of instances for class_{i+1}: "),
err_msg="Number of instances should be positive!",
)
counts.append(user_count)
print("-" * 100)
# An empty list to store values of user-entered means of classes
user_means = []
for a in range(n_classes):
user_mean = valid_input(
input_type=float,
input_msg=(f"Enter the value of mean for class_{a+1}: "),
err_msg="This is an invalid value.",
)
user_means.append(user_mean)
print("-" * 100)
print("Standard deviation: ", std_dev)
# print out the number of instances in classes in separated line
for i, count in enumerate(counts, 1):
print(f"Number of instances in class_{i} is: {count}")
print("-" * 100)
# print out mean values of classes separated line
for i, user_mean in enumerate(user_means, 1):
print(f"Mean of class_{i} is: {user_mean}")
print("-" * 100)
# Generating training dataset drawn from gaussian distribution
x = [
gaussian_distribution(user_means[j], std_dev, counts[j])
for j in range(n_classes)
]
print("Generated Normal Distribution: \n", x)
print("-" * 100)
# Generating Ys to detecting corresponding classes
y = y_generator(n_classes, counts)
print("Generated Corresponding Ys: \n", y)
print("-" * 100)
# Calculating the value of actual mean for each class
actual_means = [calculate_mean(counts[k], x[k]) for k in range(n_classes)]
# for loop iterates over number of elements in 'actual_means' list and print
# out them in separated line
for i, actual_mean in enumerate(actual_means, 1):
print(f"Actual(Real) mean of class_{i} is: {actual_mean}")
print("-" * 100)
# Calculating the value of probabilities for each class
probabilities = [
calculate_probabilities(counts[i], sum(counts)) for i in range(n_classes)
]
# for loop iterates over number of elements in 'probabilities' list and print
# out them in separated line
for i, probability in enumerate(probabilities, 1):
print(f"Probability of class_{i} is: {probability}")
print("-" * 100)
# Calculating the values of variance for each class
variance = calculate_variance(x, actual_means, sum(counts))
print("Variance: ", variance)
print("-" * 100)
# Predicting Y values
# storing predicted Y values in 'pre_indexes' variable
pre_indexes = predict_y_values(x, actual_means, variance, probabilities)
print("-" * 100)
# Calculating Accuracy of the model
print(f"Accuracy: {accuracy(y, pre_indexes)}")
print("-" * 100)
print(" DONE ".center(100, "+"))
if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q":
print("\n" + "GoodBye!".center(100, "-") + "\n")
break
system("cls" if name == "nt" else "clear")
if __name__ == "__main__":
main()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| import operator
def strand_sort(arr: list, reverse: bool = False, solution: list = None) -> list:
"""
Strand sort implementation
source: https://en.wikipedia.org/wiki/Strand_sort
:param arr: Unordered input list
:param reverse: Descent ordering flag
:param solution: Ordered items container
Examples:
>>> strand_sort([4, 2, 5, 3, 0, 1])
[0, 1, 2, 3, 4, 5]
>>> strand_sort([4, 2, 5, 3, 0, 1], reverse=True)
[5, 4, 3, 2, 1, 0]
"""
_operator = operator.lt if reverse else operator.gt
solution = solution or []
if not arr:
return solution
sublist = [arr.pop(0)]
for i, item in enumerate(arr):
if _operator(item, sublist[-1]):
sublist.append(item)
arr.pop(i)
# merging sublist into solution list
if not solution:
solution.extend(sublist)
else:
while sublist:
item = sublist.pop(0)
for i, xx in enumerate(solution):
if not _operator(item, xx):
solution.insert(i, item)
break
else:
solution.append(item)
strand_sort(arr, reverse, solution)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| import operator
def strand_sort(arr: list, reverse: bool = False, solution: list = None) -> list:
"""
Strand sort implementation
source: https://en.wikipedia.org/wiki/Strand_sort
:param arr: Unordered input list
:param reverse: Descent ordering flag
:param solution: Ordered items container
Examples:
>>> strand_sort([4, 2, 5, 3, 0, 1])
[0, 1, 2, 3, 4, 5]
>>> strand_sort([4, 2, 5, 3, 0, 1], reverse=True)
[5, 4, 3, 2, 1, 0]
"""
_operator = operator.lt if reverse else operator.gt
solution = solution or []
if not arr:
return solution
sublist = [arr.pop(0)]
for i, item in enumerate(arr):
if _operator(item, sublist[-1]):
sublist.append(item)
arr.pop(i)
# merging sublist into solution list
if not solution:
solution.extend(sublist)
else:
while sublist:
item = sublist.pop(0)
for i, xx in enumerate(solution):
if not _operator(item, xx):
solution.insert(i, item)
break
else:
solution.append(item)
strand_sort(arr, reverse, solution)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Coin sums
Problem 31: https://projecteuler.net/problem=31
In England the currency is made up of pound, £, and pence, p, and there are
eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
"""
def one_pence() -> int:
return 1
def two_pence(x: int) -> int:
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def five_pence(x: int) -> int:
return 0 if x < 0 else five_pence(x - 5) + two_pence(x)
def ten_pence(x: int) -> int:
return 0 if x < 0 else ten_pence(x - 10) + five_pence(x)
def twenty_pence(x: int) -> int:
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(x)
def fifty_pence(x: int) -> int:
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(x)
def one_pound(x: int) -> int:
return 0 if x < 0 else one_pound(x - 100) + fifty_pence(x)
def two_pound(x: int) -> int:
return 0 if x < 0 else two_pound(x - 200) + one_pound(x)
def solution(n: int = 200) -> int:
"""Returns the number of different ways can n pence be made using any number of
coins?
>>> solution(500)
6295434
>>> solution(200)
73682
>>> solution(50)
451
>>> solution(10)
11
"""
return two_pound(n)
if __name__ == "__main__":
print(solution(int(input().strip())))
| """
Coin sums
Problem 31: https://projecteuler.net/problem=31
In England the currency is made up of pound, £, and pence, p, and there are
eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
"""
def one_pence() -> int:
return 1
def two_pence(x: int) -> int:
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def five_pence(x: int) -> int:
return 0 if x < 0 else five_pence(x - 5) + two_pence(x)
def ten_pence(x: int) -> int:
return 0 if x < 0 else ten_pence(x - 10) + five_pence(x)
def twenty_pence(x: int) -> int:
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(x)
def fifty_pence(x: int) -> int:
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(x)
def one_pound(x: int) -> int:
return 0 if x < 0 else one_pound(x - 100) + fifty_pence(x)
def two_pound(x: int) -> int:
return 0 if x < 0 else two_pound(x - 200) + one_pound(x)
def solution(n: int = 200) -> int:
"""Returns the number of different ways can n pence be made using any number of
coins?
>>> solution(500)
6295434
>>> solution(200)
73682
>>> solution(50)
451
>>> solution(10)
11
"""
return two_pound(n)
if __name__ == "__main__":
print(solution(int(input().strip())))
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
@author: MatteoRaso
"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def pi_estimator(iterations: int):
"""
An implementation of the Monte Carlo method used to find pi.
1. Draw a 2x2 square centred at (0,0).
2. Inscribe a circle within the square.
3. For each iteration, place a dot anywhere in the square.
a. Record the number of dots within the circle.
4. After all the dots are placed, divide the dots in the circle by the total.
5. Multiply this value by 4 to get your estimate of pi.
6. Print the estimated and numpy value of pi
"""
# A local function to see if a dot lands in the circle.
def is_in_circle(x: float, y: float) -> bool:
distance_from_centre = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
proportion = mean(
int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
for _ in range(iterations)
)
# The ratio of the area for circle to square is pi/4.
pi_estimate = proportion * 4
print(f"The estimated value of pi is {pi_estimate}")
print(f"The numpy value of pi is {pi}")
print(f"The total error is {abs(pi - pi_estimate)}")
def area_under_curve_estimator(
iterations: int,
function_to_integrate: Callable[[float], float],
min_value: float = 0.0,
max_value: float = 1.0,
) -> float:
"""
An implementation of the Monte Carlo method to find area under
a single variable non-negative real-valued continuous function,
say f(x), where x lies within a continuous bounded interval,
say [min_value, max_value], where min_value and max_value are
finite numbers
1. Let x be a uniformly distributed random variable between min_value to
max_value
2. Expected value of f(x) =
(integrate f(x) from min_value to max_value)/(max_value - min_value)
3. Finding expected value of f(x):
a. Repeatedly draw x from uniform distribution
b. Evaluate f(x) at each of the drawn x values
c. Expected value = average of the function evaluations
4. Estimated value of integral = Expected value * (max_value - min_value)
5. Returns estimated value
"""
return mean(
function_to_integrate(uniform(min_value, max_value)) for _ in range(iterations)
) * (max_value - min_value)
def area_under_line_estimator_check(
iterations: int, min_value: float = 0.0, max_value: float = 1.0
) -> None:
"""
Checks estimation error for area_under_curve_estimator function
for f(x) = x where x lies within min_value to max_value
1. Calls "area_under_curve_estimator" function
2. Compares with the expected value
3. Prints estimated, expected and error value
"""
def identity_function(x: float) -> float:
"""
Represents identity function
>>> [function_to_integrate(x) for x in [-2.0, -1.0, 0.0, 1.0, 2.0]]
[-2.0, -1.0, 0.0, 1.0, 2.0]
"""
return x
estimated_value = area_under_curve_estimator(
iterations, identity_function, min_value, max_value
)
expected_value = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {expected_value}")
print(f"Total error is {abs(estimated_value - expected_value)}")
print("******************")
def pi_estimator_using_area_under_curve(iterations: int) -> None:
"""
Area under curve y = sqrt(4 - x^2) where x lies in 0 to 2 is equal to pi
"""
def function_to_integrate(x: float) -> float:
"""
Represents semi-circle with radius 2
>>> [function_to_integrate(x) for x in [-2.0, 0.0, 2.0]]
[0.0, 2.0, 0.0]
"""
return sqrt(4.0 - x * x)
estimated_value = area_under_curve_estimator(
iterations, function_to_integrate, 0.0, 2.0
)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {pi}")
print(f"Total error is {abs(estimated_value - pi)}")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
@author: MatteoRaso
"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def pi_estimator(iterations: int):
"""
An implementation of the Monte Carlo method used to find pi.
1. Draw a 2x2 square centred at (0,0).
2. Inscribe a circle within the square.
3. For each iteration, place a dot anywhere in the square.
a. Record the number of dots within the circle.
4. After all the dots are placed, divide the dots in the circle by the total.
5. Multiply this value by 4 to get your estimate of pi.
6. Print the estimated and numpy value of pi
"""
# A local function to see if a dot lands in the circle.
def is_in_circle(x: float, y: float) -> bool:
distance_from_centre = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
proportion = mean(
int(is_in_circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0)))
for _ in range(iterations)
)
# The ratio of the area for circle to square is pi/4.
pi_estimate = proportion * 4
print(f"The estimated value of pi is {pi_estimate}")
print(f"The numpy value of pi is {pi}")
print(f"The total error is {abs(pi - pi_estimate)}")
def area_under_curve_estimator(
iterations: int,
function_to_integrate: Callable[[float], float],
min_value: float = 0.0,
max_value: float = 1.0,
) -> float:
"""
An implementation of the Monte Carlo method to find area under
a single variable non-negative real-valued continuous function,
say f(x), where x lies within a continuous bounded interval,
say [min_value, max_value], where min_value and max_value are
finite numbers
1. Let x be a uniformly distributed random variable between min_value to
max_value
2. Expected value of f(x) =
(integrate f(x) from min_value to max_value)/(max_value - min_value)
3. Finding expected value of f(x):
a. Repeatedly draw x from uniform distribution
b. Evaluate f(x) at each of the drawn x values
c. Expected value = average of the function evaluations
4. Estimated value of integral = Expected value * (max_value - min_value)
5. Returns estimated value
"""
return mean(
function_to_integrate(uniform(min_value, max_value)) for _ in range(iterations)
) * (max_value - min_value)
def area_under_line_estimator_check(
iterations: int, min_value: float = 0.0, max_value: float = 1.0
) -> None:
"""
Checks estimation error for area_under_curve_estimator function
for f(x) = x where x lies within min_value to max_value
1. Calls "area_under_curve_estimator" function
2. Compares with the expected value
3. Prints estimated, expected and error value
"""
def identity_function(x: float) -> float:
"""
Represents identity function
>>> [function_to_integrate(x) for x in [-2.0, -1.0, 0.0, 1.0, 2.0]]
[-2.0, -1.0, 0.0, 1.0, 2.0]
"""
return x
estimated_value = area_under_curve_estimator(
iterations, identity_function, min_value, max_value
)
expected_value = (max_value * max_value - min_value * min_value) / 2
print("******************")
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {expected_value}")
print(f"Total error is {abs(estimated_value - expected_value)}")
print("******************")
def pi_estimator_using_area_under_curve(iterations: int) -> None:
"""
Area under curve y = sqrt(4 - x^2) where x lies in 0 to 2 is equal to pi
"""
def function_to_integrate(x: float) -> float:
"""
Represents semi-circle with radius 2
>>> [function_to_integrate(x) for x in [-2.0, 0.0, 2.0]]
[0.0, 2.0, 0.0]
"""
return sqrt(4.0 - x * x)
estimated_value = area_under_curve_estimator(
iterations, function_to_integrate, 0.0, 2.0
)
print("******************")
print("Estimating pi using area_under_curve_estimator")
print(f"Estimated value is {estimated_value}")
print(f"Expected value is {pi}")
print(f"Total error is {abs(estimated_value - pi)}")
print("******************")
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| from __future__ import annotations
from decimal import Decimal
def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
"""
A matrix multiplied with its inverse gives the identity matrix.
This function finds the inverse of a 2x2 matrix.
If the determinant of a matrix is 0, its inverse does not exist.
Sources for fixing inaccurate float arithmetic:
https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python
https://docs.python.org/3/library/decimal.html
>>> inverse_of_matrix([[2, 5], [2, 0]])
[[0.0, 0.5], [0.2, -0.2]]
>>> inverse_of_matrix([[2.5, 5], [1, 2]])
Traceback (most recent call last):
...
ValueError: This matrix has no inverse.
>>> inverse_of_matrix([[12, -16], [-9, 0]])
[[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]]
>>> inverse_of_matrix([[12, 3], [16, 8]])
[[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]]
>>> inverse_of_matrix([[10, 5], [3, 2.5]])
[[0.25, -0.5], [-0.3, 1.0]]
"""
d = Decimal # An abbreviation for conciseness
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2:
raise ValueError("Please provide a matrix of size 2x2.")
# Calculate the determinant of the matrix
determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1])
if determinant == 0:
raise ValueError("This matrix has no inverse.")
# Creates a copy of the matrix with swapped positions of the elements
swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix]
| from __future__ import annotations
from decimal import Decimal
def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
"""
A matrix multiplied with its inverse gives the identity matrix.
This function finds the inverse of a 2x2 matrix.
If the determinant of a matrix is 0, its inverse does not exist.
Sources for fixing inaccurate float arithmetic:
https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python
https://docs.python.org/3/library/decimal.html
>>> inverse_of_matrix([[2, 5], [2, 0]])
[[0.0, 0.5], [0.2, -0.2]]
>>> inverse_of_matrix([[2.5, 5], [1, 2]])
Traceback (most recent call last):
...
ValueError: This matrix has no inverse.
>>> inverse_of_matrix([[12, -16], [-9, 0]])
[[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]]
>>> inverse_of_matrix([[12, 3], [16, 8]])
[[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]]
>>> inverse_of_matrix([[10, 5], [3, 2.5]])
[[0.25, -0.5], [-0.3, 1.0]]
"""
d = Decimal # An abbreviation for conciseness
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(matrix) != 2 or len(matrix[0]) != 2 or len(matrix[1]) != 2:
raise ValueError("Please provide a matrix of size 2x2.")
# Calculate the determinant of the matrix
determinant = d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1])
if determinant == 0:
raise ValueError("This matrix has no inverse.")
# Creates a copy of the matrix with swapped positions of the elements
swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [[float(d(n) / determinant) or 0.0 for n in row] for row in swapped_matrix]
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| # floyd_warshall.py
"""
The problem is to find the shortest distance between all pairs of vertices in a
weighted directed graph that can have negative edge weights.
"""
def _print_dist(dist, v):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(v):
for j in range(v):
if dist[i][j] != float("inf"):
print(int(dist[i][j]), end="\t")
else:
print("INF", end="\t")
print()
def floyd_warshall(graph, v):
"""
:param graph: 2D array calculated from weight[edge[i, j]]
:type graph: List[List[float]]
:param v: number of vertices
:type v: int
:return: shortest distance between all vertex pairs
distance[u][v] will contain the shortest distance from vertex u to v.
1. For all edges from v to n, distance[i][j] = weight(edge(i, j)).
3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] +
distance[k][j]) for each possible pair i, j of vertices.
4. The above is repeated for each vertex k in the graph.
5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is
updated to the next vertex[i][k].
"""
dist = [[float("inf") for _ in range(v)] for _ in range(v)]
for i in range(v):
for j in range(v):
dist[i][j] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(v):
# looping through rows of graph array
for i in range(v):
# looping through columns of graph array
for j in range(v):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
dist[i][j] = dist[i][k] + dist[k][j]
_print_dist(dist, v)
return dist, v
if __name__ == "__main__":
v = int(input("Enter number of vertices: "))
e = int(input("Enter number of edges: "))
graph = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
graph[i][i] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
src = int(input("Enter source:"))
dst = int(input("Enter destination:"))
weight = float(input("Enter weight:"))
graph[src][dst] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| # floyd_warshall.py
"""
The problem is to find the shortest distance between all pairs of vertices in a
weighted directed graph that can have negative edge weights.
"""
def _print_dist(dist, v):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n")
for i in range(v):
for j in range(v):
if dist[i][j] != float("inf"):
print(int(dist[i][j]), end="\t")
else:
print("INF", end="\t")
print()
def floyd_warshall(graph, v):
"""
:param graph: 2D array calculated from weight[edge[i, j]]
:type graph: List[List[float]]
:param v: number of vertices
:type v: int
:return: shortest distance between all vertex pairs
distance[u][v] will contain the shortest distance from vertex u to v.
1. For all edges from v to n, distance[i][j] = weight(edge(i, j)).
3. The algorithm then performs distance[i][j] = min(distance[i][j], distance[i][k] +
distance[k][j]) for each possible pair i, j of vertices.
4. The above is repeated for each vertex k in the graph.
5. Whenever distance[i][j] is given a new minimum value, next vertex[i][j] is
updated to the next vertex[i][k].
"""
dist = [[float("inf") for _ in range(v)] for _ in range(v)]
for i in range(v):
for j in range(v):
dist[i][j] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(v):
# looping through rows of graph array
for i in range(v):
# looping through columns of graph array
for j in range(v):
if (
dist[i][k] != float("inf")
and dist[k][j] != float("inf")
and dist[i][k] + dist[k][j] < dist[i][j]
):
dist[i][j] = dist[i][k] + dist[k][j]
_print_dist(dist, v)
return dist, v
if __name__ == "__main__":
v = int(input("Enter number of vertices: "))
e = int(input("Enter number of edges: "))
graph = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
graph[i][i] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
src = int(input("Enter source:"))
dst = int(input("Enter destination:"))
weight = float(input("Enter weight:"))
graph[src][dst] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Python implementation of the MSD radix sort algorithm.
It used the binary representation of the integers to sort
them.
https://en.wikipedia.org/wiki/Radix_sort
"""
from __future__ import annotations
def msd_radix_sort(list_of_ints: list[int]) -> list[int]:
"""
Implementation of the MSD radix sort algorithm. Only works
with positive integers
:param list_of_ints: A list of integers
:return: Returns the sorted list
>>> msd_radix_sort([40, 12, 1, 100, 4])
[1, 4, 12, 40, 100]
>>> msd_radix_sort([])
[]
>>> msd_radix_sort([123, 345, 123, 80])
[80, 123, 123, 345]
>>> msd_radix_sort([1209, 834598, 1, 540402, 45])
[1, 45, 1209, 540402, 834598]
>>> msd_radix_sort([-1, 34, 45])
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
if not list_of_ints:
return []
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
return _msd_radix_sort(list_of_ints, most_bits)
def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]:
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
:param list_of_ints: A list of integers
:param bit_position: the position of the bit that gets compared
:return: Returns a partially sorted list
>>> _msd_radix_sort([45, 2, 32], 1)
[2, 32, 45]
>>> _msd_radix_sort([10, 4, 12], 2)
[4, 12, 10]
"""
if bit_position == 0 or len(list_of_ints) in [0, 1]:
return list_of_ints
zeros = []
ones = []
# Split numbers based on bit at bit_position from the right
for number in list_of_ints:
if (number >> (bit_position - 1)) & 1:
# number has a one at bit bit_position
ones.append(number)
else:
# number has a zero at bit bit_position
zeros.append(number)
# recursively split both lists further
zeros = _msd_radix_sort(zeros, bit_position - 1)
ones = _msd_radix_sort(ones, bit_position - 1)
# recombine lists
res = zeros
res.extend(ones)
return res
def msd_radix_sort_inplace(list_of_ints: list[int]):
"""
Inplace implementation of the MSD radix sort algorithm.
Sorts based on the binary representation of the integers.
>>> lst = [1, 345, 23, 89, 0, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = [1, 43, 0, 0, 0, 24, 3, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = []
>>> msd_radix_sort_inplace(lst)
>>> lst == []
True
>>> lst = [-1, 34, 23, 4, -42]
>>> msd_radix_sort_inplace(lst)
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
length = len(list_of_ints)
if not list_of_ints or length == 1:
return
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
_msd_radix_sort_inplace(list_of_ints, most_bits, 0, length)
def _msd_radix_sort_inplace(
list_of_ints: list[int], bit_position: int, begin_index: int, end_index: int
):
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
>>> lst = [45, 2, 32, 24, 534, 2932]
>>> _msd_radix_sort_inplace(lst, 1, 0, 3)
>>> lst == [32, 2, 45, 24, 534, 2932]
True
>>> lst = [0, 2, 1, 3, 12, 10, 4, 90, 54, 2323, 756]
>>> _msd_radix_sort_inplace(lst, 2, 4, 7)
>>> lst == [0, 2, 1, 3, 12, 4, 10, 90, 54, 2323, 756]
True
"""
if bit_position == 0 or end_index - begin_index <= 1:
return
bit_position -= 1
i = begin_index
j = end_index - 1
while i <= j:
changed = False
if not ((list_of_ints[i] >> bit_position) & 1):
# found zero at the beginning
i += 1
changed = True
if (list_of_ints[j] >> bit_position) & 1:
# found one at the end
j -= 1
changed = True
if changed:
continue
list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i]
j -= 1
if not j == i:
i += 1
_msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i)
_msd_radix_sort_inplace(list_of_ints, bit_position, i, end_index)
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Python implementation of the MSD radix sort algorithm.
It used the binary representation of the integers to sort
them.
https://en.wikipedia.org/wiki/Radix_sort
"""
from __future__ import annotations
def msd_radix_sort(list_of_ints: list[int]) -> list[int]:
"""
Implementation of the MSD radix sort algorithm. Only works
with positive integers
:param list_of_ints: A list of integers
:return: Returns the sorted list
>>> msd_radix_sort([40, 12, 1, 100, 4])
[1, 4, 12, 40, 100]
>>> msd_radix_sort([])
[]
>>> msd_radix_sort([123, 345, 123, 80])
[80, 123, 123, 345]
>>> msd_radix_sort([1209, 834598, 1, 540402, 45])
[1, 45, 1209, 540402, 834598]
>>> msd_radix_sort([-1, 34, 45])
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
if not list_of_ints:
return []
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
return _msd_radix_sort(list_of_ints, most_bits)
def _msd_radix_sort(list_of_ints: list[int], bit_position: int) -> list[int]:
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
:param list_of_ints: A list of integers
:param bit_position: the position of the bit that gets compared
:return: Returns a partially sorted list
>>> _msd_radix_sort([45, 2, 32], 1)
[2, 32, 45]
>>> _msd_radix_sort([10, 4, 12], 2)
[4, 12, 10]
"""
if bit_position == 0 or len(list_of_ints) in [0, 1]:
return list_of_ints
zeros = []
ones = []
# Split numbers based on bit at bit_position from the right
for number in list_of_ints:
if (number >> (bit_position - 1)) & 1:
# number has a one at bit bit_position
ones.append(number)
else:
# number has a zero at bit bit_position
zeros.append(number)
# recursively split both lists further
zeros = _msd_radix_sort(zeros, bit_position - 1)
ones = _msd_radix_sort(ones, bit_position - 1)
# recombine lists
res = zeros
res.extend(ones)
return res
def msd_radix_sort_inplace(list_of_ints: list[int]):
"""
Inplace implementation of the MSD radix sort algorithm.
Sorts based on the binary representation of the integers.
>>> lst = [1, 345, 23, 89, 0, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = [1, 43, 0, 0, 0, 24, 3, 3]
>>> msd_radix_sort_inplace(lst)
>>> lst == sorted(lst)
True
>>> lst = []
>>> msd_radix_sort_inplace(lst)
>>> lst == []
True
>>> lst = [-1, 34, 23, 4, -42]
>>> msd_radix_sort_inplace(lst)
Traceback (most recent call last):
...
ValueError: All numbers must be positive
"""
length = len(list_of_ints)
if not list_of_ints or length == 1:
return
if min(list_of_ints) < 0:
raise ValueError("All numbers must be positive")
most_bits = max(len(bin(x)[2:]) for x in list_of_ints)
_msd_radix_sort_inplace(list_of_ints, most_bits, 0, length)
def _msd_radix_sort_inplace(
list_of_ints: list[int], bit_position: int, begin_index: int, end_index: int
):
"""
Sort the given list based on the bit at bit_position. Numbers with a
0 at that position will be at the start of the list, numbers with a
1 at the end.
>>> lst = [45, 2, 32, 24, 534, 2932]
>>> _msd_radix_sort_inplace(lst, 1, 0, 3)
>>> lst == [32, 2, 45, 24, 534, 2932]
True
>>> lst = [0, 2, 1, 3, 12, 10, 4, 90, 54, 2323, 756]
>>> _msd_radix_sort_inplace(lst, 2, 4, 7)
>>> lst == [0, 2, 1, 3, 12, 4, 10, 90, 54, 2323, 756]
True
"""
if bit_position == 0 or end_index - begin_index <= 1:
return
bit_position -= 1
i = begin_index
j = end_index - 1
while i <= j:
changed = False
if not ((list_of_ints[i] >> bit_position) & 1):
# found zero at the beginning
i += 1
changed = True
if (list_of_ints[j] >> bit_position) & 1:
# found one at the end
j -= 1
changed = True
if changed:
continue
list_of_ints[i], list_of_ints[j] = list_of_ints[j], list_of_ints[i]
j -= 1
if not j == i:
i += 1
_msd_radix_sort_inplace(list_of_ints, bit_position, begin_index, i)
_msd_radix_sort_inplace(list_of_ints, bit_position, i, end_index)
if __name__ == "__main__":
import doctest
doctest.testmod()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """Created by Nathan Damon, @bizzfitch on github
>>> test_miller_rabin()
"""
def miller_rabin(n: int, allow_probable: bool = False) -> bool:
"""Deterministic Miller-Rabin algorithm for primes ~< 3.32e24.
Uses numerical analysis results to return whether or not the passed number
is prime. If the passed number is above the upper limit, and
allow_probable is True, then a return value of True indicates that n is
probably prime. This test does not allow False negatives- a return value
of False is ALWAYS composite.
Parameters
----------
n : int
The integer to be tested. Since we usually care if a number is prime,
n < 2 returns False instead of raising a ValueError.
allow_probable: bool, default False
Whether or not to test n above the upper bound of the deterministic test.
Raises
------
ValueError
Reference
---------
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime."
)
# array bounds provided by analysis
bounds = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(bounds, 1):
if n < _p:
# then we have our last prime to check
plist = primes[:idx]
break
d, s = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
pr = False
for r in range(s):
m = pow(prime, d * 2**r, n)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
pr = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def test_miller_rabin() -> None:
"""Testing a nontrivial (ends in 1, 3, 7, 9) composite
and a prime in each range.
"""
assert not miller_rabin(561)
assert miller_rabin(563)
# 2047
assert not miller_rabin(838_201)
assert miller_rabin(838_207)
# 1_373_653
assert not miller_rabin(17_316_001)
assert miller_rabin(17_316_017)
# 25_326_001
assert not miller_rabin(3_078_386_641)
assert miller_rabin(3_078_386_653)
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801)
assert miller_rabin(1_713_045_574_819)
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307)
assert miller_rabin(2_779_799_728_327)
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441)
assert miller_rabin(113_850_023_909_527)
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351)
assert miller_rabin(1_275_041_018_848_804_391)
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867)
assert miller_rabin(79_666_464_458_507_787_791_951)
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333)
assert miller_rabin(552_840_677_446_647_897_660_359)
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| """Created by Nathan Damon, @bizzfitch on github
>>> test_miller_rabin()
"""
def miller_rabin(n: int, allow_probable: bool = False) -> bool:
"""Deterministic Miller-Rabin algorithm for primes ~< 3.32e24.
Uses numerical analysis results to return whether or not the passed number
is prime. If the passed number is above the upper limit, and
allow_probable is True, then a return value of True indicates that n is
probably prime. This test does not allow False negatives- a return value
of False is ALWAYS composite.
Parameters
----------
n : int
The integer to be tested. Since we usually care if a number is prime,
n < 2 returns False instead of raising a ValueError.
allow_probable: bool, default False
Whether or not to test n above the upper bound of the deterministic test.
Raises
------
ValueError
Reference
---------
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime."
)
# array bounds provided by analysis
bounds = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(bounds, 1):
if n < _p:
# then we have our last prime to check
plist = primes[:idx]
break
d, s = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
pr = False
for r in range(s):
m = pow(prime, d * 2**r, n)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
pr = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def test_miller_rabin() -> None:
"""Testing a nontrivial (ends in 1, 3, 7, 9) composite
and a prime in each range.
"""
assert not miller_rabin(561)
assert miller_rabin(563)
# 2047
assert not miller_rabin(838_201)
assert miller_rabin(838_207)
# 1_373_653
assert not miller_rabin(17_316_001)
assert miller_rabin(17_316_017)
# 25_326_001
assert not miller_rabin(3_078_386_641)
assert miller_rabin(3_078_386_653)
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801)
assert miller_rabin(1_713_045_574_819)
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307)
assert miller_rabin(2_779_799_728_327)
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441)
assert miller_rabin(113_850_023_909_527)
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351)
assert miller_rabin(1_275_041_018_848_804_391)
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867)
assert miller_rabin(79_666_464_458_507_787_791_951)
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333)
assert miller_rabin(552_840_677_446_647_897_660_359)
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| -1 |
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| -1 |
||
TheAlgorithms/Python | 7,235 | Add Flake8 comprehensions to pre-commit | ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| CaedenPH | "2022-10-15T14:17:49Z" | "2022-10-15T17:29:42Z" | 98a4c2487814cdfe0822526e05c4e63ff6aef7d0 | a652905b605ddcc43626072366d1130315801dc9 | Add Flake8 comprehensions to pre-commit. ### Describe your change:
Implements (#7233)
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
| """
Output:
Enter an Infix Equation = a + b ^c
Symbol | Stack | Postfix
----------------------------
c | | c
^ | ^ | c
b | ^ | cb
+ | + | cb^
a | + | cb^a
| | cb^a+
a+b^c (Infix) -> +a^bc (Prefix)
"""
def infix_2_postfix(infix):
stack = []
post_fix = []
priority = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
print_width = len(infix) if (len(infix) > 7) else 7
# Print table header for output
print(
"Symbol".center(8),
"Stack".center(print_width),
"Postfix".center(print_width),
sep=" | ",
)
print("-" * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(x) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(stack) == 0:
stack.append(x) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(stack) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(x) # push x to stack
print(
x.center(8),
("".join(stack)).ljust(print_width),
("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
while len(stack) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
" ".center(8),
("".join(stack)).ljust(print_width),
("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
return "".join(post_fix) # return Postfix as str
def infix_2_prefix(infix):
infix = list(infix[::-1]) # reverse the infix equation
for i in range(len(infix)):
if infix[i] == "(":
infix[i] = ")" # change "(" to ")"
elif infix[i] == ")":
infix[i] = "(" # change ")" to "("
return (infix_2_postfix("".join(infix)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
Infix = input("\nEnter an Infix Equation = ") # Input an Infix equation
Infix = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| """
Output:
Enter an Infix Equation = a + b ^c
Symbol | Stack | Postfix
----------------------------
c | | c
^ | ^ | c
b | ^ | cb
+ | + | cb^
a | + | cb^a
| | cb^a+
a+b^c (Infix) -> +a^bc (Prefix)
"""
def infix_2_postfix(infix):
stack = []
post_fix = []
priority = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
print_width = len(infix) if (len(infix) > 7) else 7
# Print table header for output
print(
"Symbol".center(8),
"Stack".center(print_width),
"Postfix".center(print_width),
sep=" | ",
)
print("-" * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(x) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(x) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(stack) == 0:
stack.append(x) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(stack) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(x) # push x to stack
print(
x.center(8),
("".join(stack)).ljust(print_width),
("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
while len(stack) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
" ".center(8),
("".join(stack)).ljust(print_width),
("".join(post_fix)).ljust(print_width),
sep=" | ",
) # Output in tabular format
return "".join(post_fix) # return Postfix as str
def infix_2_prefix(infix):
infix = list(infix[::-1]) # reverse the infix equation
for i in range(len(infix)):
if infix[i] == "(":
infix[i] = ")" # change "(" to ")"
elif infix[i] == ")":
infix[i] = "(" # change ")" to "("
return (infix_2_postfix("".join(infix)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
Infix = input("\nEnter an Infix Equation = ") # Input an Infix equation
Infix = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| -1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.