repo_name
stringclasses
1 value
pr_number
int64
4.12k
11.2k
pr_title
stringlengths
9
107
pr_description
stringlengths
107
5.48k
author
stringlengths
4
18
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
118
5.52k
before_content
stringlengths
0
7.93M
after_content
stringlengths
0
7.93M
label
int64
-1
1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Given a array of length n, max_subarray_sum() finds the maximum of sum of contiguous sub-array using divide and conquer method. Time complexity : O(n log n) Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION (section : 4, sub-section : 4.1, page : 70) """ def max_sum_from_start(array): """This function finds the maximum contiguous sum of array from 0 index Parameters : array (list[int]) : given array Returns : max_sum (int) : maximum contiguous sum of array from 0 index """ array_sum = 0 max_sum = float("-inf") for num in array: array_sum += num if array_sum > max_sum: max_sum = array_sum return max_sum def max_cross_array_sum(array, left, mid, right): """This function finds the maximum contiguous sum of left and right arrays Parameters : array, left, mid, right (list[int], int, int, int) Returns : (int) : maximum of sum of contiguous sum of left and right arrays """ max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1]) max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1]) return max_sum_of_left + max_sum_of_right def max_subarray_sum(array, left, right): """Maximum contiguous sub-array sum, using divide and conquer method Parameters : array, left, right (list[int], int, int) : given array, current left index and current right index Returns : int : maximum of sum of contiguous sub-array """ # base case: array has only one element if left == right: return array[right] # Recursion mid = (left + right) // 2 left_half_sum = max_subarray_sum(array, left, mid) right_half_sum = max_subarray_sum(array, mid + 1, right) cross_sum = max_cross_array_sum(array, left, mid, right) return max(left_half_sum, right_half_sum, cross_sum) array = [-2, -5, 6, -2, -3, 1, 5, -6] array_length = len(array) print( "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1) )
""" Given a array of length n, max_subarray_sum() finds the maximum of sum of contiguous sub-array using divide and conquer method. Time complexity : O(n log n) Ref : INTRODUCTION TO ALGORITHMS THIRD EDITION (section : 4, sub-section : 4.1, page : 70) """ def max_sum_from_start(array): """This function finds the maximum contiguous sum of array from 0 index Parameters : array (list[int]) : given array Returns : max_sum (int) : maximum contiguous sum of array from 0 index """ array_sum = 0 max_sum = float("-inf") for num in array: array_sum += num if array_sum > max_sum: max_sum = array_sum return max_sum def max_cross_array_sum(array, left, mid, right): """This function finds the maximum contiguous sum of left and right arrays Parameters : array, left, mid, right (list[int], int, int, int) Returns : (int) : maximum of sum of contiguous sum of left and right arrays """ max_sum_of_left = max_sum_from_start(array[left : mid + 1][::-1]) max_sum_of_right = max_sum_from_start(array[mid + 1 : right + 1]) return max_sum_of_left + max_sum_of_right def max_subarray_sum(array, left, right): """Maximum contiguous sub-array sum, using divide and conquer method Parameters : array, left, right (list[int], int, int) : given array, current left index and current right index Returns : int : maximum of sum of contiguous sub-array """ # base case: array has only one element if left == right: return array[right] # Recursion mid = (left + right) // 2 left_half_sum = max_subarray_sum(array, left, mid) right_half_sum = max_subarray_sum(array, mid + 1, right) cross_sum = max_cross_array_sum(array, left, mid, right) return max(left_half_sum, right_half_sum, cross_sum) array = [-2, -5, 6, -2, -3, 1, 5, -6] array_length = len(array) print( "Maximum sum of contiguous subarray:", max_subarray_sum(array, 0, array_length - 1) )
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
class StackOverflowError(BaseException): pass class Stack: """A stack is an abstract data type that serves as a collection of elements with two principal operations: push() and pop(). push() adds an element to the top of the stack, and pop() removes an element from the top of a stack. The order in which elements come off of a stack are Last In, First Out (LIFO). https://en.wikipedia.org/wiki/Stack_(abstract_data_type) """ def __init__(self, limit: int = 10): self.stack = [] self.limit = limit def __bool__(self) -> bool: return bool(self.stack) def __str__(self) -> str: return str(self.stack) def push(self, data): """ Push an element to the top of the stack.""" if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) def pop(self): """ Pop an element off of the top of the stack.""" return self.stack.pop() def peek(self): """ Peek at the top-most element of the stack.""" return self.stack[-1] def is_empty(self) -> bool: """ Check if a stack is empty.""" return not bool(self.stack) def is_full(self) -> bool: return self.size() == self.limit def size(self) -> int: """ Return the size of the stack.""" return len(self.stack) def __contains__(self, item) -> bool: """Check if item is in stack""" return item in self.stack def test_stack() -> None: """ >>> test_stack() """ stack = Stack(10) assert bool(stack) is False assert stack.is_empty() is True assert stack.is_full() is False assert str(stack) == "[]" try: _ = stack.pop() assert False # This should not happen except IndexError: assert True # This should happen try: _ = stack.peek() assert False # This should not happen except IndexError: assert True # This should happen for i in range(10): assert stack.size() == i stack.push(i) assert bool(stack) is True assert stack.is_empty() is False assert stack.is_full() is True assert str(stack) == str(list(range(10))) assert stack.pop() == 9 assert stack.peek() == 8 stack.push(100) assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100]) try: stack.push(200) assert False # This should not happen except StackOverflowError: assert True # This should happen assert stack.is_empty() is False assert stack.size() == 10 assert 5 in stack assert 55 not in stack if __name__ == "__main__": test_stack()
class StackOverflowError(BaseException): pass class Stack: """A stack is an abstract data type that serves as a collection of elements with two principal operations: push() and pop(). push() adds an element to the top of the stack, and pop() removes an element from the top of a stack. The order in which elements come off of a stack are Last In, First Out (LIFO). https://en.wikipedia.org/wiki/Stack_(abstract_data_type) """ def __init__(self, limit: int = 10): self.stack = [] self.limit = limit def __bool__(self) -> bool: return bool(self.stack) def __str__(self) -> str: return str(self.stack) def push(self, data): """ Push an element to the top of the stack.""" if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) def pop(self): """ Pop an element off of the top of the stack.""" return self.stack.pop() def peek(self): """ Peek at the top-most element of the stack.""" return self.stack[-1] def is_empty(self) -> bool: """ Check if a stack is empty.""" return not bool(self.stack) def is_full(self) -> bool: return self.size() == self.limit def size(self) -> int: """ Return the size of the stack.""" return len(self.stack) def __contains__(self, item) -> bool: """Check if item is in stack""" return item in self.stack def test_stack() -> None: """ >>> test_stack() """ stack = Stack(10) assert bool(stack) is False assert stack.is_empty() is True assert stack.is_full() is False assert str(stack) == "[]" try: _ = stack.pop() assert False # This should not happen except IndexError: assert True # This should happen try: _ = stack.peek() assert False # This should not happen except IndexError: assert True # This should happen for i in range(10): assert stack.size() == i stack.push(i) assert bool(stack) is True assert stack.is_empty() is False assert stack.is_full() is True assert str(stack) == str(list(range(10))) assert stack.pop() == 9 assert stack.peek() == 8 stack.push(100) assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100]) try: stack.push(200) assert False # This should not happen except StackOverflowError: assert True # This should happen assert stack.is_empty() is False assert stack.size() == 10 assert 5 in stack assert 55 not in stack if __name__ == "__main__": test_stack()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import cv2 import numpy as np from digital_image_processing.filters.convolve import img_convolve from digital_image_processing.filters.sobel_filter import sobel_filter PI = 180 def gen_gaussian_kernel(k_size, sigma): center = k_size // 2 x, y = np.mgrid[0 - center : k_size - center, 0 - center : k_size - center] g = ( 1 / (2 * np.pi * sigma) * np.exp(-(np.square(x) + np.square(y)) / (2 * np.square(sigma))) ) return g def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): image_row, image_col = image.shape[0], image.shape[1] # gaussian_filter gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) # get the gradient and degree by sobel_filter sobel_grad, sobel_theta = sobel_filter(gaussian_out) gradient_direction = np.rad2deg(sobel_theta) gradient_direction += PI dst = np.zeros((image_row, image_col)) """ Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ for row in range(1, image_row - 1): for col in range(1, image_col - 1): direction = gradient_direction[row, col] if ( 0 <= direction < 22.5 or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): W = sobel_grad[row, col - 1] E = sobel_grad[row, col + 1] if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E: dst[row, col] = sobel_grad[row, col] elif (PI / 8 <= direction < 3 * PI / 8) or ( 9 * PI / 8 <= direction < 11 * PI / 8 ): SW = sobel_grad[row + 1, col - 1] NE = sobel_grad[row - 1, col + 1] if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE: dst[row, col] = sobel_grad[row, col] elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( 11 * PI / 8 <= direction < 13 * PI / 8 ): N = sobel_grad[row - 1, col] S = sobel_grad[row + 1, col] if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S: dst[row, col] = sobel_grad[row, col] elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( 13 * PI / 8 <= direction < 15 * PI / 8 ): NW = sobel_grad[row - 1, col - 1] SE = sobel_grad[row + 1, col + 1] if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE: dst[row, col] = sobel_grad[row, col] """ High-Low threshold detection. If an edge pixel’s gradient value is higher than the high threshold value, it is marked as a strong edge pixel. If an edge pixel’s gradient value is smaller than the high threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge pixel's value is smaller than the low threshold value, it will be suppressed. """ if dst[row, col] >= threshold_high: dst[row, col] = strong elif dst[row, col] <= threshold_low: dst[row, col] = 0 else: dst[row, col] = weak """ Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected neighborhood, that weak edge point can be identified as one that should be preserved. """ for row in range(1, image_row): for col in range(1, image_col): if dst[row, col] == weak: if 255 in ( dst[row, col + 1], dst[row, col - 1], dst[row - 1, col], dst[row + 1, col], dst[row - 1, col - 1], dst[row + 1, col - 1], dst[row - 1, col + 1], dst[row + 1, col + 1], ): dst[row, col] = strong else: dst[row, col] = 0 return dst if __name__ == "__main__": # read original image in gray mode lena = cv2.imread(r"../image_data/lena.jpg", 0) # canny edge detection canny_dst = canny(lena) cv2.imshow("canny", canny_dst) cv2.waitKey(0)
import cv2 import numpy as np from digital_image_processing.filters.convolve import img_convolve from digital_image_processing.filters.sobel_filter import sobel_filter PI = 180 def gen_gaussian_kernel(k_size, sigma): center = k_size // 2 x, y = np.mgrid[0 - center : k_size - center, 0 - center : k_size - center] g = ( 1 / (2 * np.pi * sigma) * np.exp(-(np.square(x) + np.square(y)) / (2 * np.square(sigma))) ) return g def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): image_row, image_col = image.shape[0], image.shape[1] # gaussian_filter gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) # get the gradient and degree by sobel_filter sobel_grad, sobel_theta = sobel_filter(gaussian_out) gradient_direction = np.rad2deg(sobel_theta) gradient_direction += PI dst = np.zeros((image_row, image_col)) """ Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ for row in range(1, image_row - 1): for col in range(1, image_col - 1): direction = gradient_direction[row, col] if ( 0 <= direction < 22.5 or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): W = sobel_grad[row, col - 1] E = sobel_grad[row, col + 1] if sobel_grad[row, col] >= W and sobel_grad[row, col] >= E: dst[row, col] = sobel_grad[row, col] elif (PI / 8 <= direction < 3 * PI / 8) or ( 9 * PI / 8 <= direction < 11 * PI / 8 ): SW = sobel_grad[row + 1, col - 1] NE = sobel_grad[row - 1, col + 1] if sobel_grad[row, col] >= SW and sobel_grad[row, col] >= NE: dst[row, col] = sobel_grad[row, col] elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( 11 * PI / 8 <= direction < 13 * PI / 8 ): N = sobel_grad[row - 1, col] S = sobel_grad[row + 1, col] if sobel_grad[row, col] >= N and sobel_grad[row, col] >= S: dst[row, col] = sobel_grad[row, col] elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( 13 * PI / 8 <= direction < 15 * PI / 8 ): NW = sobel_grad[row - 1, col - 1] SE = sobel_grad[row + 1, col + 1] if sobel_grad[row, col] >= NW and sobel_grad[row, col] >= SE: dst[row, col] = sobel_grad[row, col] """ High-Low threshold detection. If an edge pixel’s gradient value is higher than the high threshold value, it is marked as a strong edge pixel. If an edge pixel’s gradient value is smaller than the high threshold value and larger than the low threshold value, it is marked as a weak edge pixel. If an edge pixel's value is smaller than the low threshold value, it will be suppressed. """ if dst[row, col] >= threshold_high: dst[row, col] = strong elif dst[row, col] <= threshold_low: dst[row, col] = 0 else: dst[row, col] = weak """ Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected neighborhood, that weak edge point can be identified as one that should be preserved. """ for row in range(1, image_row): for col in range(1, image_col): if dst[row, col] == weak: if 255 in ( dst[row, col + 1], dst[row, col - 1], dst[row - 1, col], dst[row + 1, col], dst[row - 1, col - 1], dst[row + 1, col - 1], dst[row - 1, col + 1], dst[row + 1, col + 1], ): dst[row, col] = strong else: dst[row, col] = 0 return dst if __name__ == "__main__": # read original image in gray mode lena = cv2.imread(r"../image_data/lena.jpg", 0) # canny edge detection canny_dst = canny(lena) cv2.imshow("canny", canny_dst) cv2.waitKey(0)
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from typing import Iterable, Union import numpy as np Vector = Union[Iterable[float], Iterable[int], np.ndarray] VectorOut = Union[np.float64, int, float] def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. >>> euclidean_distance((0, 0), (2, 2)) 2.8284271247461903 >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) 3.4641016151377544 >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) 8.0 >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors without numpy. A vector is defined as a list, tuple, or numpy 1D array. >>> euclidean_distance_no_np((0, 0), (2, 2)) 2.8284271247461903 >>> euclidean_distance_no_np([1, 2, 3, 4], [5, 6, 7, 8]) 8.0 """ return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2) if __name__ == "__main__": def benchmark() -> None: """ Benchmarks """ from timeit import timeit print("Without Numpy") print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) ) print("With Numpy") print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) ) benchmark()
from typing import Iterable, Union import numpy as np Vector = Union[Iterable[float], Iterable[int], np.ndarray] VectorOut = Union[np.float64, int, float] def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors. A vector is defined as a list, tuple, or numpy 1D array. >>> euclidean_distance((0, 0), (2, 2)) 2.8284271247461903 >>> euclidean_distance(np.array([0, 0, 0]), np.array([2, 2, 2])) 3.4641016151377544 >>> euclidean_distance(np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])) 8.0 >>> euclidean_distance([1, 2, 3, 4], [5, 6, 7, 8]) 8.0 """ return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2)) def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut: """ Calculate the distance between the two endpoints of two vectors without numpy. A vector is defined as a list, tuple, or numpy 1D array. >>> euclidean_distance_no_np((0, 0), (2, 2)) 2.8284271247461903 >>> euclidean_distance_no_np([1, 2, 3, 4], [5, 6, 7, 8]) 8.0 """ return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2) if __name__ == "__main__": def benchmark() -> None: """ Benchmarks """ from timeit import timeit print("Without Numpy") print( timeit( "euclidean_distance_no_np([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) ) print("With Numpy") print( timeit( "euclidean_distance([1, 2, 3], [4, 5, 6])", number=10000, globals=globals(), ) ) benchmark()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Problem 20: https://projecteuler.net/problem=20 n! means n × (n − 1) × ... × 3 × 2 × 1 For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! """ def factorial(num: int) -> int: """Find the factorial of a given number n""" fact = 1 for i in range(1, num + 1): fact *= i return fact def split_and_add(number: int) -> int: """Split number digits and add them.""" sum_of_digits = 0 while number > 0: last_digit = number % 10 sum_of_digits += last_digit number = number // 10 # Removing the last_digit from the given number return sum_of_digits def solution(num: int = 100) -> int: """Returns the sum of the digits in the factorial of num >>> solution(100) 648 >>> solution(50) 216 >>> solution(10) 27 >>> solution(5) 3 >>> solution(3) 6 >>> solution(2) 2 >>> solution(1) 1 """ nfact = factorial(num) result = split_and_add(nfact) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
""" Problem 20: https://projecteuler.net/problem=20 n! means n × (n − 1) × ... × 3 × 2 × 1 For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! """ def factorial(num: int) -> int: """Find the factorial of a given number n""" fact = 1 for i in range(1, num + 1): fact *= i return fact def split_and_add(number: int) -> int: """Split number digits and add them.""" sum_of_digits = 0 while number > 0: last_digit = number % 10 sum_of_digits += last_digit number = number // 10 # Removing the last_digit from the given number return sum_of_digits def solution(num: int = 100) -> int: """Returns the sum of the digits in the factorial of num >>> solution(100) 648 >>> solution(50) 216 >>> solution(10) 27 >>> solution(5) 3 >>> solution(3) 6 >>> solution(2) 2 >>> solution(1) 1 """ nfact = factorial(num) result = split_and_add(nfact) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from PIL import Image def change_brightness(img: Image, level: float) -> Image: """ Change the brightness of a PIL Image to a given level. """ def brightness(c: int) -> float: """ Fundamental Transformation/Operation that'll be performed on every bit. """ return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)") return img.point(brightness) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 brigt_img = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
from PIL import Image def change_brightness(img: Image, level: float) -> Image: """ Change the brightness of a PIL Image to a given level. """ def brightness(c: int) -> float: """ Fundamental Transformation/Operation that'll be performed on every bit. """ return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("level must be between -255.0 (black) and 255.0 (white)") return img.point(brightness) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 brigt_img = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
"""Non recursive implementation of a DFS algorithm.""" from __future__ import annotations def depth_first_search(graph: dict, start: str) -> set[int]: """Depth First Search on Graph :param graph: directed graph in dictionary format :param vertex: starting vertex as a string :returns: the trace of the search >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], ... "F": ["C", "E", "G"], "G": ["F"] } >>> start = "A" >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'}) >>> all(x in output_G for x in list(depth_first_search(G, "A"))) True >>> all(x in output_G for x in list(depth_first_search(G, "G"))) True """ explored, stack = set(start), [start] while stack: v = stack.pop() explored.add(v) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(adj) return explored G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
"""Non recursive implementation of a DFS algorithm.""" from __future__ import annotations def depth_first_search(graph: dict, start: str) -> set[int]: """Depth First Search on Graph :param graph: directed graph in dictionary format :param vertex: starting vertex as a string :returns: the trace of the search >>> G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], ... "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], ... "F": ["C", "E", "G"], "G": ["F"] } >>> start = "A" >>> output_G = list({'A', 'B', 'C', 'D', 'E', 'F', 'G'}) >>> all(x in output_G for x in list(depth_first_search(G, "A"))) True >>> all(x in output_G for x in list(depth_first_search(G, "G"))) True """ explored, stack = set(start), [start] while stack: v = stack.pop() explored.add(v) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(adj) return explored G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" This is pure Python implementation of Tabu search algorithm for a Travelling Salesman Problem, that the distances between the cities are symmetric (the distance between city 'a' and city 'b' is the same between city 'b' and city 'a'). The TSP can be represented into a graph. The cities are represented by nodes and the distance between them is represented by the weight of the ark between the nodes. The .txt file with the graph has the form: node1 node2 distance_between_node1_and_node2 node1 node3 distance_between_node1_and_node3 ... Be careful node1, node2 and the distance between them, must exist only once. This means in the .txt file should not exist: node1 node2 distance_between_node1_and_node2 node2 node1 distance_between_node2_and_node1 For pytests run following command: pytest For manual testing run: python tabu_search.py -f your_file_name.txt -number_of_iterations_of_tabu_search \ -s size_of_tabu_search e.g. python tabu_search.py -f tabudata2.txt -i 4 -s 3 """ import argparse import copy def generate_neighbours(path): """ Pure implementation of generating a dictionary of neighbors and the cost with each neighbor, given a path file that includes a graph. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :return dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. Example of dict_of_neighbours: >>) dict_of_neighbours[a] [[b,20],[c,18],[d,22],[e,26]] This indicates the neighbors of node (city) 'a', which has neighbor the node 'b' with distance 20, the node 'c' with distance 18, the node 'd' with distance 22 and the node 'e' with distance 26. """ dict_of_neighbours = {} with open(path) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _list = list() _list.append([line.split()[1], line.split()[2]]) dict_of_neighbours[line.split()[0]] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _list = list() _list.append([line.split()[0], line.split()[2]]) dict_of_neighbours[line.split()[1]] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def generate_first_solution(path, dict_of_neighbours): """ Pure implementation of generating the first solution for the Tabu search to start, with the redundant resolution strategy. That means that we start from the starting node (e.g. node 'a'), then we go to the city nearest (lowest distance) to this node (let's assume is node 'c'), then we go to the nearest city of the node 'c', etc. till we have visited all cities and return to the starting node. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :return distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. """ with open(path) as f: start_node = f.read(1) end_node = start_node first_solution = [] visiting = start_node distance_of_first_solution = 0 while visiting not in first_solution: minim = 10000 for k in dict_of_neighbours[visiting]: if int(k[1]) < int(minim) and k[0] not in first_solution: minim = k[1] best_node = k[0] first_solution.append(visiting) distance_of_first_solution = distance_of_first_solution + int(minim) visiting = best_node first_solution.append(end_node) position = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 distance_of_first_solution = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1]) - 10000 ) return first_solution, distance_of_first_solution def find_neighborhood(solution, dict_of_neighbours): """ Pure implementation of generating the neighborhood (sorted by total distance of each solution from lowest to highest) of a solution with 1-1 exchange method, that means we exchange each node in a solution with each other node and generating a number of solution named neighborhood. :param solution: The solution in which we want to find the neighborhood. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return neighborhood_of_solution: A list that includes the solutions and the total distance of each solution (in form of list) that are produced with 1-1 exchange from the solution that the method took as an input Example: >>> find_neighborhood(['a', 'c', 'b', 'd', 'e', 'a'], ... {'a': [['b', '20'], ['c', '18'], ['d', '22'], ['e', '26']], ... 'c': [['a', '18'], ['b', '10'], ['d', '23'], ['e', '24']], ... 'b': [['a', '20'], ['c', '10'], ['d', '11'], ['e', '12']], ... 'e': [['a', '26'], ['b', '12'], ['c', '24'], ['d', '40']], ... 'd': [['a', '22'], ['b', '11'], ['c', '23'], ['e', '40']]} ... ) # doctest: +NORMALIZE_WHITESPACE [['a', 'e', 'b', 'd', 'c', 'a', 90], ['a', 'c', 'd', 'b', 'e', 'a', 90], ['a', 'd', 'b', 'c', 'e', 'a', 93], ['a', 'c', 'b', 'e', 'd', 'a', 102], ['a', 'c', 'e', 'd', 'b', 'a', 113], ['a', 'b', 'c', 'd', 'e', 'a', 119]] """ neighborhood_of_solution = [] for n in solution[1:-1]: idx1 = solution.index(n) for kn in solution[1:-1]: idx2 = solution.index(kn) if n == kn: continue _tmp = copy.deepcopy(solution) _tmp[idx1] = kn _tmp[idx2] = n distance = 0 for k in _tmp[:-1]: next_node = _tmp[_tmp.index(k) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: distance = distance + int(i[1]) _tmp.append(distance) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) indexOfLastItemInTheList = len(neighborhood_of_solution[0]) - 1 neighborhood_of_solution.sort(key=lambda x: x[indexOfLastItemInTheList]) return neighborhood_of_solution def tabu_search( first_solution, distance_of_first_solution, dict_of_neighbours, iters, size ): """ Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in Python. :param first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :param distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :param iters: The number of iterations that Tabu search will execute. :param size: The size of Tabu List. :return best_solution_ever: The solution with the lowest distance that occurred during the execution of Tabu search. :return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution ever. """ count = 1 solution = first_solution tabu_list = list() best_cost = distance_of_first_solution best_solution_ever = solution while count <= iters: neighborhood = find_neighborhood(solution, dict_of_neighbours) index_of_best_solution = 0 best_solution = neighborhood[index_of_best_solution] best_cost_index = len(best_solution) - 1 found = False while not found: i = 0 while i < len(best_solution): if best_solution[i] != solution[i]: first_exchange_node = best_solution[i] second_exchange_node = solution[i] break i = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node]) found = True solution = best_solution[:-1] cost = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: best_cost = cost best_solution_ever = solution else: index_of_best_solution = index_of_best_solution + 1 best_solution = neighborhood[index_of_best_solution] if len(tabu_list) >= size: tabu_list.pop(0) count = count + 1 return best_solution_ever, best_cost def main(args=None): dict_of_neighbours = generate_neighbours(args.File) first_solution, distance_of_first_solution = generate_first_solution( args.File, dict_of_neighbours ) best_sol, best_cost = tabu_search( first_solution, distance_of_first_solution, dict_of_neighbours, args.Iterations, args.Size, ) print(f"Best solution: {best_sol}, with total distance: {best_cost}.") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
""" This is pure Python implementation of Tabu search algorithm for a Travelling Salesman Problem, that the distances between the cities are symmetric (the distance between city 'a' and city 'b' is the same between city 'b' and city 'a'). The TSP can be represented into a graph. The cities are represented by nodes and the distance between them is represented by the weight of the ark between the nodes. The .txt file with the graph has the form: node1 node2 distance_between_node1_and_node2 node1 node3 distance_between_node1_and_node3 ... Be careful node1, node2 and the distance between them, must exist only once. This means in the .txt file should not exist: node1 node2 distance_between_node1_and_node2 node2 node1 distance_between_node2_and_node1 For pytests run following command: pytest For manual testing run: python tabu_search.py -f your_file_name.txt -number_of_iterations_of_tabu_search \ -s size_of_tabu_search e.g. python tabu_search.py -f tabudata2.txt -i 4 -s 3 """ import argparse import copy def generate_neighbours(path): """ Pure implementation of generating a dictionary of neighbors and the cost with each neighbor, given a path file that includes a graph. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :return dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. Example of dict_of_neighbours: >>) dict_of_neighbours[a] [[b,20],[c,18],[d,22],[e,26]] This indicates the neighbors of node (city) 'a', which has neighbor the node 'b' with distance 20, the node 'c' with distance 18, the node 'd' with distance 22 and the node 'e' with distance 26. """ dict_of_neighbours = {} with open(path) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _list = list() _list.append([line.split()[1], line.split()[2]]) dict_of_neighbours[line.split()[0]] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _list = list() _list.append([line.split()[0], line.split()[2]]) dict_of_neighbours[line.split()[1]] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def generate_first_solution(path, dict_of_neighbours): """ Pure implementation of generating the first solution for the Tabu search to start, with the redundant resolution strategy. That means that we start from the starting node (e.g. node 'a'), then we go to the city nearest (lowest distance) to this node (let's assume is node 'c'), then we go to the nearest city of the node 'c', etc. till we have visited all cities and return to the starting node. :param path: The path to the .txt file that includes the graph (e.g.tabudata2.txt) :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :return distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. """ with open(path) as f: start_node = f.read(1) end_node = start_node first_solution = [] visiting = start_node distance_of_first_solution = 0 while visiting not in first_solution: minim = 10000 for k in dict_of_neighbours[visiting]: if int(k[1]) < int(minim) and k[0] not in first_solution: minim = k[1] best_node = k[0] first_solution.append(visiting) distance_of_first_solution = distance_of_first_solution + int(minim) visiting = best_node first_solution.append(end_node) position = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 distance_of_first_solution = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1]) - 10000 ) return first_solution, distance_of_first_solution def find_neighborhood(solution, dict_of_neighbours): """ Pure implementation of generating the neighborhood (sorted by total distance of each solution from lowest to highest) of a solution with 1-1 exchange method, that means we exchange each node in a solution with each other node and generating a number of solution named neighborhood. :param solution: The solution in which we want to find the neighborhood. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :return neighborhood_of_solution: A list that includes the solutions and the total distance of each solution (in form of list) that are produced with 1-1 exchange from the solution that the method took as an input Example: >>> find_neighborhood(['a', 'c', 'b', 'd', 'e', 'a'], ... {'a': [['b', '20'], ['c', '18'], ['d', '22'], ['e', '26']], ... 'c': [['a', '18'], ['b', '10'], ['d', '23'], ['e', '24']], ... 'b': [['a', '20'], ['c', '10'], ['d', '11'], ['e', '12']], ... 'e': [['a', '26'], ['b', '12'], ['c', '24'], ['d', '40']], ... 'd': [['a', '22'], ['b', '11'], ['c', '23'], ['e', '40']]} ... ) # doctest: +NORMALIZE_WHITESPACE [['a', 'e', 'b', 'd', 'c', 'a', 90], ['a', 'c', 'd', 'b', 'e', 'a', 90], ['a', 'd', 'b', 'c', 'e', 'a', 93], ['a', 'c', 'b', 'e', 'd', 'a', 102], ['a', 'c', 'e', 'd', 'b', 'a', 113], ['a', 'b', 'c', 'd', 'e', 'a', 119]] """ neighborhood_of_solution = [] for n in solution[1:-1]: idx1 = solution.index(n) for kn in solution[1:-1]: idx2 = solution.index(kn) if n == kn: continue _tmp = copy.deepcopy(solution) _tmp[idx1] = kn _tmp[idx2] = n distance = 0 for k in _tmp[:-1]: next_node = _tmp[_tmp.index(k) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: distance = distance + int(i[1]) _tmp.append(distance) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) indexOfLastItemInTheList = len(neighborhood_of_solution[0]) - 1 neighborhood_of_solution.sort(key=lambda x: x[indexOfLastItemInTheList]) return neighborhood_of_solution def tabu_search( first_solution, distance_of_first_solution, dict_of_neighbours, iters, size ): """ Pure implementation of Tabu search algorithm for a Travelling Salesman Problem in Python. :param first_solution: The solution for the first iteration of Tabu search using the redundant resolution strategy in a list. :param distance_of_first_solution: The total distance that Travelling Salesman will travel, if he follows the path in first_solution. :param dict_of_neighbours: Dictionary with key each node and value a list of lists with the neighbors of the node and the cost (distance) for each neighbor. :param iters: The number of iterations that Tabu search will execute. :param size: The size of Tabu List. :return best_solution_ever: The solution with the lowest distance that occurred during the execution of Tabu search. :return best_cost: The total distance that Travelling Salesman will travel, if he follows the path in best_solution ever. """ count = 1 solution = first_solution tabu_list = list() best_cost = distance_of_first_solution best_solution_ever = solution while count <= iters: neighborhood = find_neighborhood(solution, dict_of_neighbours) index_of_best_solution = 0 best_solution = neighborhood[index_of_best_solution] best_cost_index = len(best_solution) - 1 found = False while not found: i = 0 while i < len(best_solution): if best_solution[i] != solution[i]: first_exchange_node = best_solution[i] second_exchange_node = solution[i] break i = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node]) found = True solution = best_solution[:-1] cost = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: best_cost = cost best_solution_ever = solution else: index_of_best_solution = index_of_best_solution + 1 best_solution = neighborhood[index_of_best_solution] if len(tabu_list) >= size: tabu_list.pop(0) count = count + 1 return best_solution_ever, best_cost def main(args=None): dict_of_neighbours = generate_neighbours(args.File) first_solution, distance_of_first_solution = generate_first_solution( args.File, dict_of_neighbours ) best_sol, best_cost = tabu_search( first_solution, distance_of_first_solution, dict_of_neighbours, args.Iterations, args.Size, ) print(f"Best solution: {best_sol}, with total distance: {best_cost}.") if __name__ == "__main__": parser = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from .stack import Stack def balanced_parentheses(parentheses: str) -> bool: """Use a stack to check if a string of parentheses is balanced. >>> balanced_parentheses("([]{})") True >>> balanced_parentheses("[()]{}{[()()]()}") True >>> balanced_parentheses("[(])") False >>> balanced_parentheses("1+2*3-4") True >>> balanced_parentheses("") True """ stack = Stack() bracket_pairs = {"(": ")", "[": "]", "{": "}"} for bracket in parentheses: if bracket in bracket_pairs: stack.push(bracket) elif bracket in (")", "]", "}"): if stack.is_empty() or bracket_pairs[stack.pop()] != bracket: return False return stack.is_empty() if __name__ == "__main__": from doctest import testmod testmod() examples = ["((()))", "((())", "(()))"] print("Balanced parentheses demonstration:\n") for example in examples: not_str = "" if balanced_parentheses(example) else "not " print(f"{example} is {not_str}balanced")
from .stack import Stack def balanced_parentheses(parentheses: str) -> bool: """Use a stack to check if a string of parentheses is balanced. >>> balanced_parentheses("([]{})") True >>> balanced_parentheses("[()]{}{[()()]()}") True >>> balanced_parentheses("[(])") False >>> balanced_parentheses("1+2*3-4") True >>> balanced_parentheses("") True """ stack = Stack() bracket_pairs = {"(": ")", "[": "]", "{": "}"} for bracket in parentheses: if bracket in bracket_pairs: stack.push(bracket) elif bracket in (")", "]", "}"): if stack.is_empty() or bracket_pairs[stack.pop()] != bracket: return False return stack.is_empty() if __name__ == "__main__": from doctest import testmod testmod() examples = ["((()))", "((())", "(()))"] print("Balanced parentheses demonstration:\n") for example in examples: not_str = "" if balanced_parentheses(example) else "not " print(f"{example} is {not_str}balanced")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Heap's algorithm returns the list of all permutations possible from a list. It minimizes movement by generating each permutation from the previous one by swapping only two elements. More information: https://en.wikipedia.org/wiki/Heap%27s_algorithm. """ def heaps(arr: list) -> list: """ Pure python implementation of the Heap's algorithm (recursive version), returning all permutations of a list. >>> heaps([]) [()] >>> heaps([0]) [(0,)] >>> heaps([-1, 1]) [(-1, 1), (1, -1)] >>> heaps([1, 2, 3]) [(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)] >>> from itertools import permutations >>> sorted(heaps([1,2,3])) == sorted(permutations([1,2,3])) True >>> all(sorted(heaps(x)) == sorted(permutations(x)) ... for x in ([], [0], [-1, 1], [1, 2, 3])) True """ if len(arr) <= 1: return [tuple(arr)] res = [] def generate(k: int, arr: list): if k == 1: res.append(tuple(arr[:])) return generate(k - 1, arr) for i in range(k - 1): if k % 2 == 0: # k is even arr[i], arr[k - 1] = arr[k - 1], arr[i] else: # k is odd arr[0], arr[k - 1] = arr[k - 1], arr[0] generate(k - 1, arr) generate(len(arr), arr) return res if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() arr = [int(item) for item in user_input.split(",")] print(heaps(arr))
""" Heap's algorithm returns the list of all permutations possible from a list. It minimizes movement by generating each permutation from the previous one by swapping only two elements. More information: https://en.wikipedia.org/wiki/Heap%27s_algorithm. """ def heaps(arr: list) -> list: """ Pure python implementation of the Heap's algorithm (recursive version), returning all permutations of a list. >>> heaps([]) [()] >>> heaps([0]) [(0,)] >>> heaps([-1, 1]) [(-1, 1), (1, -1)] >>> heaps([1, 2, 3]) [(1, 2, 3), (2, 1, 3), (3, 1, 2), (1, 3, 2), (2, 3, 1), (3, 2, 1)] >>> from itertools import permutations >>> sorted(heaps([1,2,3])) == sorted(permutations([1,2,3])) True >>> all(sorted(heaps(x)) == sorted(permutations(x)) ... for x in ([], [0], [-1, 1], [1, 2, 3])) True """ if len(arr) <= 1: return [tuple(arr)] res = [] def generate(k: int, arr: list): if k == 1: res.append(tuple(arr[:])) return generate(k - 1, arr) for i in range(k - 1): if k % 2 == 0: # k is even arr[i], arr[k - 1] = arr[k - 1], arr[i] else: # k is odd arr[0], arr[k - 1] = arr[k - 1], arr[0] generate(k - 1, arr) generate(len(arr), arr) return res if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() arr = [int(item) for item in user_input.split(",")] print(heaps(arr))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" A Queue using a linked list like structure """ from typing import Any class Node: def __init__(self, data: Any) -> None: self.data = data self.next = None def __str__(self) -> str: return f"{self.data}" class LinkedQueue: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> queue.put(5) >>> queue.put(9) >>> queue.put('python') >>> queue.is_empty(); False >>> queue.get() 5 >>> queue.put('algorithms') >>> queue.get() 9 >>> queue.get() 'python' >>> queue.get() 'algorithms' >>> queue.is_empty() True >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue """ def __init__(self) -> None: self.front = self.rear = None def __iter__(self): node = self.front while node: yield node.data node = node.next def __len__(self) -> int: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> len(queue) 5 >>> for i in range(1, 6): ... assert len(queue) == 6 - i ... _ = queue.get() >>> len(queue) 0 """ return len(tuple(iter(self))) def __str__(self) -> str: """ >>> queue = LinkedQueue() >>> for i in range(1, 4): ... queue.put(i) >>> queue.put("Python") >>> queue.put(3.14) >>> queue.put(True) >>> str(queue) '1 <- 2 <- 3 <- Python <- 3.14 <- True' """ return " <- ".join(str(item) for item in self) def is_empty(self) -> bool: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> for i in range(1, 6): ... queue.put(i) >>> queue.is_empty() False """ return len(self) == 0 def put(self, item) -> None: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> for i in range(1, 6): ... queue.put(i) >>> str(queue) '1 <- 2 <- 3 <- 4 <- 5' """ node = Node(item) if self.is_empty(): self.front = self.rear = node else: assert isinstance(self.rear, Node) self.rear.next = node self.rear = node def get(self) -> Any: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> for i in range(1, 6): ... assert queue.get() == i >>> len(queue) 0 """ if self.is_empty(): raise IndexError("dequeue from empty queue") assert isinstance(self.front, Node) node = self.front self.front = self.front.next if self.front is None: self.rear = None return node.data def clear(self) -> None: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> queue.clear() >>> len(queue) 0 >>> str(queue) '' """ self.front = self.rear = None if __name__ == "__main__": from doctest import testmod testmod()
""" A Queue using a linked list like structure """ from typing import Any class Node: def __init__(self, data: Any) -> None: self.data = data self.next = None def __str__(self) -> str: return f"{self.data}" class LinkedQueue: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> queue.put(5) >>> queue.put(9) >>> queue.put('python') >>> queue.is_empty(); False >>> queue.get() 5 >>> queue.put('algorithms') >>> queue.get() 9 >>> queue.get() 'python' >>> queue.get() 'algorithms' >>> queue.is_empty() True >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue """ def __init__(self) -> None: self.front = self.rear = None def __iter__(self): node = self.front while node: yield node.data node = node.next def __len__(self) -> int: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> len(queue) 5 >>> for i in range(1, 6): ... assert len(queue) == 6 - i ... _ = queue.get() >>> len(queue) 0 """ return len(tuple(iter(self))) def __str__(self) -> str: """ >>> queue = LinkedQueue() >>> for i in range(1, 4): ... queue.put(i) >>> queue.put("Python") >>> queue.put(3.14) >>> queue.put(True) >>> str(queue) '1 <- 2 <- 3 <- Python <- 3.14 <- True' """ return " <- ".join(str(item) for item in self) def is_empty(self) -> bool: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> for i in range(1, 6): ... queue.put(i) >>> queue.is_empty() False """ return len(self) == 0 def put(self, item) -> None: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> for i in range(1, 6): ... queue.put(i) >>> str(queue) '1 <- 2 <- 3 <- 4 <- 5' """ node = Node(item) if self.is_empty(): self.front = self.rear = node else: assert isinstance(self.rear, Node) self.rear.next = node self.rear = node def get(self) -> Any: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> for i in range(1, 6): ... assert queue.get() == i >>> len(queue) 0 """ if self.is_empty(): raise IndexError("dequeue from empty queue") assert isinstance(self.front, Node) node = self.front self.front = self.front.next if self.front is None: self.rear = None return node.data def clear(self) -> None: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> queue.clear() >>> len(queue) 0 >>> str(queue) '' """ self.front = self.rear = None if __name__ == "__main__": from doctest import testmod testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Recursive Prorgam to create a Linked List from a sequence and # print a string representation of it. class Node: def __init__(self, data=None): self.data = data self.next = None def __repr__(self): """Returns a visual representation of the node and all its following nodes.""" string_rep = "" temp = self while temp: string_rep += f"<{temp.data}> ---> " temp = temp.next string_rep += "<END>" return string_rep def make_linked_list(elements_list): """Creates a Linked List from the elements of the given sequence (list/tuple) and returns the head of the Linked List.""" # if elements_list is empty if not elements_list: raise Exception("The Elements List is empty") # Set first element as Head head = Node(elements_list[0]) current = head # Loop through elements from position 1 for data in elements_list[1:]: current.next = Node(data) current = current.next return head list_data = [1, 3, 5, 32, 44, 12, 43] print(f"List: {list_data}") print("Creating Linked List from List.") linked_list = make_linked_list(list_data) print("Linked List:") print(linked_list)
# Recursive Prorgam to create a Linked List from a sequence and # print a string representation of it. class Node: def __init__(self, data=None): self.data = data self.next = None def __repr__(self): """Returns a visual representation of the node and all its following nodes.""" string_rep = "" temp = self while temp: string_rep += f"<{temp.data}> ---> " temp = temp.next string_rep += "<END>" return string_rep def make_linked_list(elements_list): """Creates a Linked List from the elements of the given sequence (list/tuple) and returns the head of the Linked List.""" # if elements_list is empty if not elements_list: raise Exception("The Elements List is empty") # Set first element as Head head = Node(elements_list[0]) current = head # Loop through elements from position 1 for data in elements_list[1:]: current.next = Node(data) current = current.next return head list_data = [1, 3, 5, 32, 44, 12, 43] print(f"List: {list_data}") print("Creating Linked List from List.") linked_list = make_linked_list(list_data) print("Linked List:") print(linked_list)
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Problem 46: https://projecteuler.net/problem=46 It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. 9 = 7 + 2 × 12 15 = 7 + 2 × 22 21 = 3 + 2 × 32 25 = 7 + 2 × 32 27 = 19 + 2 × 22 33 = 31 + 2 × 12 It turns out that the conjecture was false. What is the smallest odd composite that cannot be written as the sum of a prime and twice a square? """ from __future__ import annotations seive = [True] * 100001 i = 2 while i * i <= 100000: if seive[i]: for j in range(i * i, 100001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 2 <= n <= 100000 >>> is_prime(87) False >>> is_prime(23) True >>> is_prime(25363) False """ return seive[n] odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)] def compute_nums(n: int) -> list[int]: """ Returns a list of first n odd composite numbers which do not follow the conjecture. >>> compute_nums(1) [5777] >>> compute_nums(2) [5777, 5993] >>> compute_nums(0) Traceback (most recent call last): ... ValueError: n must be >= 0 >>> compute_nums("a") Traceback (most recent call last): ... ValueError: n must be an integer >>> compute_nums(1.1) Traceback (most recent call last): ... ValueError: n must be an integer """ if not isinstance(n, int): raise ValueError("n must be an integer") if n <= 0: raise ValueError("n must be >= 0") list_nums = [] for num in range(len(odd_composites)): i = 0 while 2 * i * i <= odd_composites[num]: rem = odd_composites[num] - 2 * i * i if is_prime(rem): break i += 1 else: list_nums.append(odd_composites[num]) if len(list_nums) == n: return list_nums def solution() -> int: """Return the solution to the problem""" return compute_nums(1)[0] if __name__ == "__main__": print(f"{solution() = }")
""" Problem 46: https://projecteuler.net/problem=46 It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. 9 = 7 + 2 × 12 15 = 7 + 2 × 22 21 = 3 + 2 × 32 25 = 7 + 2 × 32 27 = 19 + 2 × 22 33 = 31 + 2 × 12 It turns out that the conjecture was false. What is the smallest odd composite that cannot be written as the sum of a prime and twice a square? """ from __future__ import annotations seive = [True] * 100001 i = 2 while i * i <= 100000: if seive[i]: for j in range(i * i, 100001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 2 <= n <= 100000 >>> is_prime(87) False >>> is_prime(23) True >>> is_prime(25363) False """ return seive[n] odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)] def compute_nums(n: int) -> list[int]: """ Returns a list of first n odd composite numbers which do not follow the conjecture. >>> compute_nums(1) [5777] >>> compute_nums(2) [5777, 5993] >>> compute_nums(0) Traceback (most recent call last): ... ValueError: n must be >= 0 >>> compute_nums("a") Traceback (most recent call last): ... ValueError: n must be an integer >>> compute_nums(1.1) Traceback (most recent call last): ... ValueError: n must be an integer """ if not isinstance(n, int): raise ValueError("n must be an integer") if n <= 0: raise ValueError("n must be >= 0") list_nums = [] for num in range(len(odd_composites)): i = 0 while 2 * i * i <= odd_composites[num]: rem = odd_composites[num] - 2 * i * i if is_prime(rem): break i += 1 else: list_nums.append(odd_composites[num]) if len(list_nums) == n: return list_nums def solution() -> int: """Return the solution to the problem""" return compute_nums(1)[0] if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Convert International System of Units (SI) and Binary prefixes """ from enum import Enum from typing import Union class SI_Unit(Enum): yotta = 24 zetta = 21 exa = 18 peta = 15 tera = 12 giga = 9 mega = 6 kilo = 3 hecto = 2 deca = 1 deci = -1 centi = -2 milli = -3 micro = -6 nano = -9 pico = -12 femto = -15 atto = -18 zepto = -21 yocto = -24 class Binary_Unit(Enum): yotta = 8 zetta = 7 exa = 6 peta = 5 tera = 4 giga = 3 mega = 2 kilo = 1 def convert_si_prefix( known_amount: float, known_prefix: Union[str, SI_Unit], unknown_prefix: Union[str, SI_Unit], ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega) 1000 >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga) 0.001 >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo) 1 >>> convert_si_prefix(1, 'giga', 'mega') 1000 >>> convert_si_prefix(1, 'gIGa', 'mEGa') 1000 """ if isinstance(known_prefix, str): known_prefix: SI_Unit = SI_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): unknown_prefix: SI_Unit = SI_Unit[unknown_prefix.lower()] unknown_amount = known_amount * (10 ** (known_prefix.value - unknown_prefix.value)) return unknown_amount def convert_binary_prefix( known_amount: float, known_prefix: Union[str, Binary_Unit], unknown_prefix: Union[str, Binary_Unit], ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega) 1024 >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga) 0.0009765625 >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo) 1 >>> convert_binary_prefix(1, 'giga', 'mega') 1024 >>> convert_binary_prefix(1, 'gIGa', 'mEGa') 1024 """ if isinstance(known_prefix, str): known_prefix: Binary_Unit = Binary_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): unknown_prefix: Binary_Unit = Binary_Unit[unknown_prefix.lower()] unknown_amount = known_amount * ( 2 ** ((known_prefix.value - unknown_prefix.value) * 10) ) return unknown_amount if __name__ == "__main__": import doctest doctest.testmod()
""" Convert International System of Units (SI) and Binary prefixes """ from enum import Enum from typing import Union class SI_Unit(Enum): yotta = 24 zetta = 21 exa = 18 peta = 15 tera = 12 giga = 9 mega = 6 kilo = 3 hecto = 2 deca = 1 deci = -1 centi = -2 milli = -3 micro = -6 nano = -9 pico = -12 femto = -15 atto = -18 zepto = -21 yocto = -24 class Binary_Unit(Enum): yotta = 8 zetta = 7 exa = 6 peta = 5 tera = 4 giga = 3 mega = 2 kilo = 1 def convert_si_prefix( known_amount: float, known_prefix: Union[str, SI_Unit], unknown_prefix: Union[str, SI_Unit], ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Binary_prefix Wikipedia reference: https://en.wikipedia.org/wiki/International_System_of_Units >>> convert_si_prefix(1, SI_Unit.giga, SI_Unit.mega) 1000 >>> convert_si_prefix(1, SI_Unit.mega, SI_Unit.giga) 0.001 >>> convert_si_prefix(1, SI_Unit.kilo, SI_Unit.kilo) 1 >>> convert_si_prefix(1, 'giga', 'mega') 1000 >>> convert_si_prefix(1, 'gIGa', 'mEGa') 1000 """ if isinstance(known_prefix, str): known_prefix: SI_Unit = SI_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): unknown_prefix: SI_Unit = SI_Unit[unknown_prefix.lower()] unknown_amount = known_amount * (10 ** (known_prefix.value - unknown_prefix.value)) return unknown_amount def convert_binary_prefix( known_amount: float, known_prefix: Union[str, Binary_Unit], unknown_prefix: Union[str, Binary_Unit], ) -> float: """ Wikipedia reference: https://en.wikipedia.org/wiki/Metric_prefix >>> convert_binary_prefix(1, Binary_Unit.giga, Binary_Unit.mega) 1024 >>> convert_binary_prefix(1, Binary_Unit.mega, Binary_Unit.giga) 0.0009765625 >>> convert_binary_prefix(1, Binary_Unit.kilo, Binary_Unit.kilo) 1 >>> convert_binary_prefix(1, 'giga', 'mega') 1024 >>> convert_binary_prefix(1, 'gIGa', 'mEGa') 1024 """ if isinstance(known_prefix, str): known_prefix: Binary_Unit = Binary_Unit[known_prefix.lower()] if isinstance(unknown_prefix, str): unknown_prefix: Binary_Unit = Binary_Unit[unknown_prefix.lower()] unknown_amount = known_amount * ( 2 ** ((known_prefix.value - unknown_prefix.value) * 10) ) return unknown_amount if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Find Volumes of Various Shapes. Wikipedia reference: https://en.wikipedia.org/wiki/Volume """ from math import pi, pow from typing import Union def vol_cube(side_length: Union[int, float]) -> float: """ Calculate the Volume of a Cube. >>> vol_cube(1) 1.0 >>> vol_cube(3) 27.0 """ return pow(side_length, 3) def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. :return multiple of width, length and height >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) 6.0 """ return float(width * height * length) def vol_cone(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * area_of_base * height >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) 0.3333333333333333 """ return area_of_base * height / 3.0 def vol_right_circ_cone(radius: float, height: float) -> float: """ Calculate the Volume of a Right Circular Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * pi * radius^2 * height >>> vol_right_circ_cone(2, 3) 12.566370614359172 """ return pi * pow(radius, 2) * height / 3.0 def vol_prism(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Prism. Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) :return V = Bh >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) 11.0 """ return float(area_of_base * height) def vol_pyramid(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Pyramid. Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) :return (1/3) * Bh >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) 1.5 """ return area_of_base * height / 3.0 def vol_sphere(radius: float) -> float: """ Calculate the Volume of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere :return (4/3) * pi * r^3 >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) 4.1887902047863905 """ return 4 / 3 * pi * pow(radius, 3) def vol_circular_cylinder(radius: float, height: float) -> float: """Calculate the Volume of a Circular Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder :return pi * radius^2 * height >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) 150.79644737231007 """ return pi * pow(radius, 2) * height def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") print("Cube: " + str(vol_cube(2))) # = 8 print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8 print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33 print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38 print("Prism: " + str(vol_prism(2, 2))) # = 4 print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 if __name__ == "__main__": main()
""" Find Volumes of Various Shapes. Wikipedia reference: https://en.wikipedia.org/wiki/Volume """ from math import pi, pow from typing import Union def vol_cube(side_length: Union[int, float]) -> float: """ Calculate the Volume of a Cube. >>> vol_cube(1) 1.0 >>> vol_cube(3) 27.0 """ return pow(side_length, 3) def vol_cuboid(width: float, height: float, length: float) -> float: """ Calculate the Volume of a Cuboid. :return multiple of width, length and height >>> vol_cuboid(1, 1, 1) 1.0 >>> vol_cuboid(1, 2, 3) 6.0 """ return float(width * height * length) def vol_cone(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * area_of_base * height >>> vol_cone(10, 3) 10.0 >>> vol_cone(1, 1) 0.3333333333333333 """ return area_of_base * height / 3.0 def vol_right_circ_cone(radius: float, height: float) -> float: """ Calculate the Volume of a Right Circular Cone. Wikipedia reference: https://en.wikipedia.org/wiki/Cone :return (1/3) * pi * radius^2 * height >>> vol_right_circ_cone(2, 3) 12.566370614359172 """ return pi * pow(radius, 2) * height / 3.0 def vol_prism(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Prism. Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) :return V = Bh >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) 11.0 """ return float(area_of_base * height) def vol_pyramid(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Pyramid. Wikipedia reference: https://en.wikipedia.org/wiki/Pyramid_(geometry) :return (1/3) * Bh >>> vol_pyramid(10, 3) 10.0 >>> vol_pyramid(1.5, 3) 1.5 """ return area_of_base * height / 3.0 def vol_sphere(radius: float) -> float: """ Calculate the Volume of a Sphere. Wikipedia reference: https://en.wikipedia.org/wiki/Sphere :return (4/3) * pi * r^3 >>> vol_sphere(5) 523.5987755982989 >>> vol_sphere(1) 4.1887902047863905 """ return 4 / 3 * pi * pow(radius, 3) def vol_circular_cylinder(radius: float, height: float) -> float: """Calculate the Volume of a Circular Cylinder. Wikipedia reference: https://en.wikipedia.org/wiki/Cylinder :return pi * radius^2 * height >>> vol_circular_cylinder(1, 1) 3.141592653589793 >>> vol_circular_cylinder(4, 3) 150.79644737231007 """ return pi * pow(radius, 2) * height def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") print("Cube: " + str(vol_cube(2))) # = 8 print("Cuboid: " + str(vol_cuboid(2, 2, 2))) # = 8 print("Cone: " + str(vol_cone(2, 2))) # ~= 1.33 print("Right Circular Cone: " + str(vol_right_circ_cone(2, 2))) # ~= 8.38 print("Prism: " + str(vol_prism(2, 2))) # = 4 print("Pyramid: " + str(vol_pyramid(2, 2))) # ~= 1.33 print("Sphere: " + str(vol_sphere(2))) # ~= 33.5 print("Circular Cylinder: " + str(vol_circular_cylinder(2, 2))) # ~= 25.1 if __name__ == "__main__": main()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
END = "#" class Trie: def __init__(self): self._trie = {} def insert_word(self, text): trie = self._trie for char in text: if char not in trie: trie[char] = {} trie = trie[char] trie[END] = True def find_word(self, prefix): trie = self._trie for char in prefix: if char in trie: trie = trie[char] else: return [] return self._elements(trie) def _elements(self, d): result = [] for c, v in d.items(): if c == END: sub_result = [" "] else: sub_result = [c + s for s in self._elements(v)] result.extend(sub_result) return tuple(result) trie = Trie() words = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def autocomplete_using_trie(s): """ >>> trie = Trie() >>> for word in words: ... trie.insert_word(word) ... >>> matches = autocomplete_using_trie("de") "detergent " in matches True "dog " in matches False """ suffixes = trie.find_word(s) return tuple(s + w for w in suffixes) def main(): print(autocomplete_using_trie("de")) if __name__ == "__main__": main()
END = "#" class Trie: def __init__(self): self._trie = {} def insert_word(self, text): trie = self._trie for char in text: if char not in trie: trie[char] = {} trie = trie[char] trie[END] = True def find_word(self, prefix): trie = self._trie for char in prefix: if char in trie: trie = trie[char] else: return [] return self._elements(trie) def _elements(self, d): result = [] for c, v in d.items(): if c == END: sub_result = [" "] else: sub_result = [c + s for s in self._elements(v)] result.extend(sub_result) return tuple(result) trie = Trie() words = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def autocomplete_using_trie(s): """ >>> trie = Trie() >>> for word in words: ... trie.insert_word(word) ... >>> matches = autocomplete_using_trie("de") "detergent " in matches True "dog " in matches False """ suffixes = trie.find_word(s) return tuple(s + w for w in suffixes) def main(): print(autocomplete_using_trie("de")) if __name__ == "__main__": main()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset dataset = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) X = dataset.iloc[:, 1:2].values y = dataset.iloc[:, 2].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() pol_reg.fit(X_poly, y) # Visualizing the Polymonial Regression results def viz_polymonial(): plt.scatter(X, y, color="red") plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") plt.title("Truth or Bluff (Linear Regression)") plt.xlabel("Position level") plt.ylabel("Salary") plt.show() return if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset dataset = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) X = dataset.iloc[:, 1:2].values y = dataset.iloc[:, 2].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() pol_reg.fit(X_poly, y) # Visualizing the Polymonial Regression results def viz_polymonial(): plt.scatter(X, y, color="red") plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") plt.title("Truth or Bluff (Linear Regression)") plt.xlabel("Position level") plt.ylabel("Salary") plt.show() return if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 7: https://projecteuler.net/problem=7 10001st prime By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10001st prime number? References: - https://en.wikipedia.org/wiki/Prime_number """ from math import sqrt def is_prime(num: int) -> bool: """ Determines whether the given number is prime or not >>> is_prime(2) True >>> is_prime(15) False >>> is_prime(29) True >>> is_prime(0) False """ if num == 2: return True elif num % 2 == 0: return False else: sq = int(sqrt(num)) + 1 for i in range(3, sq, 2): if num % i == 0: return False return True def solution(nth: int = 10001) -> int: """ Returns the n-th prime number. >>> solution(6) 13 >>> solution(1) 2 >>> solution(3) 5 >>> solution(20) 71 >>> solution(50) 229 >>> solution(100) 541 """ count = 0 number = 1 while count != nth and number < 3: number += 1 if is_prime(number): count += 1 while count != nth: number += 2 if is_prime(number): count += 1 return number if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 7: https://projecteuler.net/problem=7 10001st prime By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10001st prime number? References: - https://en.wikipedia.org/wiki/Prime_number """ from math import sqrt def is_prime(num: int) -> bool: """ Determines whether the given number is prime or not >>> is_prime(2) True >>> is_prime(15) False >>> is_prime(29) True >>> is_prime(0) False """ if num == 2: return True elif num % 2 == 0: return False else: sq = int(sqrt(num)) + 1 for i in range(3, sq, 2): if num % i == 0: return False return True def solution(nth: int = 10001) -> int: """ Returns the n-th prime number. >>> solution(6) 13 >>> solution(1) 2 >>> solution(3) 5 >>> solution(20) 71 >>> solution(50) 229 >>> solution(100) 541 """ count = 0 number = 1 while count != nth and number < 3: number += 1 if is_prime(number): count += 1 while count != nth: number += 2 if is_prime(number): count += 1 return number if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" GEOMETRIC MEAN : https://en.wikipedia.org/wiki/Geometric_mean """ def is_geometric_series(series: list) -> bool: """ checking whether the input series is geometric series or not >>> is_geometric_series([2, 4, 8]) True >>> is_geometric_series([3, 6, 12, 24]) True >>> is_geometric_series([1, 2, 3]) False >>> is_geometric_series([0, 0, 3]) False """ if len(series) == 1: return True try: common_ratio = series[1] / series[0] for index in range(len(series) - 1): if series[index + 1] / series[index] != common_ratio: return False except ZeroDivisionError: return False return True def geometric_mean(series: list) -> float: """ return the geometric mean of series >>> geometric_mean([2, 4, 8]) 3.9999999999999996 >>> geometric_mean([3, 6, 12, 24]) 8.48528137423857 >>> geometric_mean([4, 8, 16]) 7.999999999999999 >>> geometric_mean(4) Traceback (most recent call last): ... ValueError: Input series is not valid, valid series - [2, 4, 8] >>> geometric_mean([1, 2, 3]) Traceback (most recent call last): ... ValueError: Input list is not a geometric series >>> geometric_mean([0, 2, 3]) Traceback (most recent call last): ... ValueError: Input list is not a geometric series >>> geometric_mean([]) Traceback (most recent call last): ... ValueError: Input list must be a non empty list """ if not isinstance(series, list): raise ValueError("Input series is not valid, valid series - [2, 4, 8]") if len(series) == 0: raise ValueError("Input list must be a non empty list") if not is_geometric_series(series): raise ValueError("Input list is not a geometric series") answer = 1 for value in series: answer *= value return pow(answer, 1 / len(series)) if __name__ == "__main__": import doctest doctest.testmod()
""" GEOMETRIC MEAN : https://en.wikipedia.org/wiki/Geometric_mean """ def is_geometric_series(series: list) -> bool: """ checking whether the input series is geometric series or not >>> is_geometric_series([2, 4, 8]) True >>> is_geometric_series([3, 6, 12, 24]) True >>> is_geometric_series([1, 2, 3]) False >>> is_geometric_series([0, 0, 3]) False """ if len(series) == 1: return True try: common_ratio = series[1] / series[0] for index in range(len(series) - 1): if series[index + 1] / series[index] != common_ratio: return False except ZeroDivisionError: return False return True def geometric_mean(series: list) -> float: """ return the geometric mean of series >>> geometric_mean([2, 4, 8]) 3.9999999999999996 >>> geometric_mean([3, 6, 12, 24]) 8.48528137423857 >>> geometric_mean([4, 8, 16]) 7.999999999999999 >>> geometric_mean(4) Traceback (most recent call last): ... ValueError: Input series is not valid, valid series - [2, 4, 8] >>> geometric_mean([1, 2, 3]) Traceback (most recent call last): ... ValueError: Input list is not a geometric series >>> geometric_mean([0, 2, 3]) Traceback (most recent call last): ... ValueError: Input list is not a geometric series >>> geometric_mean([]) Traceback (most recent call last): ... ValueError: Input list must be a non empty list """ if not isinstance(series, list): raise ValueError("Input series is not valid, valid series - [2, 4, 8]") if len(series) == 0: raise ValueError("Input list must be a non empty list") if not is_geometric_series(series): raise ValueError("Input list is not a geometric series") answer = 1 for value in series: answer *= value return pow(answer, 1 / len(series)) if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from typing import List, Tuple """ Algorithm for calculating the most cost-efficient sequence for converting one string into another. The only allowed operations are --- Cost to copy a character is copy_cost --- Cost to replace a character is replace_cost --- Cost to delete a character is delete_cost --- Cost to insert a character is insert_cost """ def compute_transform_tables( source_string: str, destination_string: str, copy_cost: int, replace_cost: int, delete_cost: int, insert_cost: int, ) -> Tuple[List[int], List[str]]: source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] ops = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost ops[i][0] = "D%c" % source_seq[i - 1] for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost ops[0][i] = "I%c" % destination_seq[i - 1] for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost ops[i][j] = "C%c" % source_seq[i - 1] else: costs[i][j] = costs[i - 1][j - 1] + replace_cost ops[i][j] = "R%c" % source_seq[i - 1] + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost ops[i][j] = "D%c" % source_seq[i - 1] if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost ops[i][j] = "I%c" % destination_seq[j - 1] return costs, ops def assemble_transformation(ops: List[str], i: int, j: int) -> List[str]: if i == 0 and j == 0: return [] else: if ops[i][j][0] == "C" or ops[i][j][0] == "R": seq = assemble_transformation(ops, i - 1, j - 1) seq.append(ops[i][j]) return seq elif ops[i][j][0] == "D": seq = assemble_transformation(ops, i - 1, j) seq.append(ops[i][j]) return seq else: seq = assemble_transformation(ops, i, j - 1) seq.append(ops[i][j]) return seq if __name__ == "__main__": _, operations = compute_transform_tables("Python", "Algorithms", -1, 1, 2, 2) m = len(operations) n = len(operations[0]) sequence = assemble_transformation(operations, m - 1, n - 1) string = list("Python") i = 0 cost = 0 with open("min_cost.txt", "w") as file: for op in sequence: print("".join(string)) if op[0] == "C": file.write("%-16s" % "Copy %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost -= 1 elif op[0] == "R": string[i] = op[2] file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) file.write("\t\t" + "".join(string)) file.write("\r\n") cost += 1 elif op[0] == "D": string.pop(i) file.write("%-16s" % "Delete %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost += 2 else: string.insert(i, op[1]) file.write("%-16s" % "Insert %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost += 2 i += 1 print("".join(string)) print("Cost: ", cost) file.write("\r\nMinimum cost: " + str(cost))
from typing import List, Tuple """ Algorithm for calculating the most cost-efficient sequence for converting one string into another. The only allowed operations are --- Cost to copy a character is copy_cost --- Cost to replace a character is replace_cost --- Cost to delete a character is delete_cost --- Cost to insert a character is insert_cost """ def compute_transform_tables( source_string: str, destination_string: str, copy_cost: int, replace_cost: int, delete_cost: int, insert_cost: int, ) -> Tuple[List[int], List[str]]: source_seq = list(source_string) destination_seq = list(destination_string) len_source_seq = len(source_seq) len_destination_seq = len(destination_seq) costs = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] ops = [ [0 for _ in range(len_destination_seq + 1)] for _ in range(len_source_seq + 1) ] for i in range(1, len_source_seq + 1): costs[i][0] = i * delete_cost ops[i][0] = "D%c" % source_seq[i - 1] for i in range(1, len_destination_seq + 1): costs[0][i] = i * insert_cost ops[0][i] = "I%c" % destination_seq[i - 1] for i in range(1, len_source_seq + 1): for j in range(1, len_destination_seq + 1): if source_seq[i - 1] == destination_seq[j - 1]: costs[i][j] = costs[i - 1][j - 1] + copy_cost ops[i][j] = "C%c" % source_seq[i - 1] else: costs[i][j] = costs[i - 1][j - 1] + replace_cost ops[i][j] = "R%c" % source_seq[i - 1] + str(destination_seq[j - 1]) if costs[i - 1][j] + delete_cost < costs[i][j]: costs[i][j] = costs[i - 1][j] + delete_cost ops[i][j] = "D%c" % source_seq[i - 1] if costs[i][j - 1] + insert_cost < costs[i][j]: costs[i][j] = costs[i][j - 1] + insert_cost ops[i][j] = "I%c" % destination_seq[j - 1] return costs, ops def assemble_transformation(ops: List[str], i: int, j: int) -> List[str]: if i == 0 and j == 0: return [] else: if ops[i][j][0] == "C" or ops[i][j][0] == "R": seq = assemble_transformation(ops, i - 1, j - 1) seq.append(ops[i][j]) return seq elif ops[i][j][0] == "D": seq = assemble_transformation(ops, i - 1, j) seq.append(ops[i][j]) return seq else: seq = assemble_transformation(ops, i, j - 1) seq.append(ops[i][j]) return seq if __name__ == "__main__": _, operations = compute_transform_tables("Python", "Algorithms", -1, 1, 2, 2) m = len(operations) n = len(operations[0]) sequence = assemble_transformation(operations, m - 1, n - 1) string = list("Python") i = 0 cost = 0 with open("min_cost.txt", "w") as file: for op in sequence: print("".join(string)) if op[0] == "C": file.write("%-16s" % "Copy %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost -= 1 elif op[0] == "R": string[i] = op[2] file.write("%-16s" % ("Replace %c" % op[1] + " with " + str(op[2]))) file.write("\t\t" + "".join(string)) file.write("\r\n") cost += 1 elif op[0] == "D": string.pop(i) file.write("%-16s" % "Delete %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost += 2 else: string.insert(i, op[1]) file.write("%-16s" % "Insert %c" % op[1]) file.write("\t\t\t" + "".join(string)) file.write("\r\n") cost += 2 i += 1 print("".join(string)) print("Cost: ", cost) file.write("\r\nMinimum cost: " + str(cost))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Get the citation from google scholar using title and year of publication, and volume and pages of journal. """ import requests from bs4 import BeautifulSoup def get_citation(base_url: str, params: dict) -> str: """ Return the citation number. """ soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text() if __name__ == "__main__": params = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("http://scholar.google.com/scholar_lookup", params=params))
""" Get the citation from google scholar using title and year of publication, and volume and pages of journal. """ import requests from bs4 import BeautifulSoup def get_citation(base_url: str, params: dict) -> str: """ Return the citation number. """ soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser") div = soup.find("div", attrs={"class": "gs_ri"}) anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") return anchors[2].get_text() if __name__ == "__main__": params = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("http://scholar.google.com/scholar_lookup", params=params))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 2: https://projecteuler.net/problem=2 Even Fibonacci Numbers Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms. References: - https://en.wikipedia.org/wiki/Fibonacci_number """ import math from decimal import Decimal, getcontext def solution(n: int = 4000000) -> int: """ Returns the sum of all even fibonacci sequence elements that are lower or equal to n. >>> solution(10) 10 >>> solution(15) 10 >>> solution(2) 2 >>> solution(1) 0 >>> solution(34) 44 >>> solution(3.4) 2 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") getcontext().prec = 100 phi = (Decimal(5) ** Decimal(0.5) + 1) / Decimal(2) index = (math.floor(math.log(n * (phi + 2), phi) - 1) // 3) * 3 + 2 num = Decimal(round(phi ** Decimal(index + 1))) / (phi + 2) total = num // 2 return int(total) if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 2: https://projecteuler.net/problem=2 Even Fibonacci Numbers Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms. References: - https://en.wikipedia.org/wiki/Fibonacci_number """ import math from decimal import Decimal, getcontext def solution(n: int = 4000000) -> int: """ Returns the sum of all even fibonacci sequence elements that are lower or equal to n. >>> solution(10) 10 >>> solution(15) 10 >>> solution(2) 2 >>> solution(1) 0 >>> solution(34) 44 >>> solution(3.4) 2 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") getcontext().prec = 100 phi = (Decimal(5) ** Decimal(0.5) + 1) / Decimal(2) index = (math.floor(math.log(n * (phi + 2), phi) - 1) // 3) * 3 + 2 num = Decimal(round(phi ** Decimal(index + 1))) / (phi + 2) total = num // 2 return int(total) if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Problem 43: https://projecteuler.net/problem=43 The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9 in some order, but it also has a rather interesting sub-string divisibility property. Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following: d2d3d4=406 is divisible by 2 d3d4d5=063 is divisible by 3 d4d5d6=635 is divisible by 5 d5d6d7=357 is divisible by 7 d6d7d8=572 is divisible by 11 d7d8d9=728 is divisible by 13 d8d9d10=289 is divisible by 17 Find the sum of all 0 to 9 pandigital numbers with this property. """ from itertools import permutations def is_substring_divisible(num: tuple) -> bool: """ Returns True if the pandigital number passes all the divisibility tests. >>> is_substring_divisible((0, 1, 2, 4, 6, 5, 7, 3, 8, 9)) False >>> is_substring_divisible((5, 1, 2, 4, 6, 0, 7, 8, 3, 9)) False >>> is_substring_divisible((1, 4, 0, 6, 3, 5, 7, 2, 8, 9)) True """ tests = [2, 3, 5, 7, 11, 13, 17] for i, test in enumerate(tests): if (num[i + 1] * 100 + num[i + 2] * 10 + num[i + 3]) % test != 0: return False return True def solution(n: int = 10) -> int: """ Returns the sum of all pandigital numbers which pass the divisiility tests. >>> solution(10) 16695334890 """ list_nums = [ int("".join(map(str, num))) for num in permutations(range(n)) if is_substring_divisible(num) ] return sum(list_nums) if __name__ == "__main__": print(f"{solution() = }")
""" Problem 43: https://projecteuler.net/problem=43 The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9 in some order, but it also has a rather interesting sub-string divisibility property. Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following: d2d3d4=406 is divisible by 2 d3d4d5=063 is divisible by 3 d4d5d6=635 is divisible by 5 d5d6d7=357 is divisible by 7 d6d7d8=572 is divisible by 11 d7d8d9=728 is divisible by 13 d8d9d10=289 is divisible by 17 Find the sum of all 0 to 9 pandigital numbers with this property. """ from itertools import permutations def is_substring_divisible(num: tuple) -> bool: """ Returns True if the pandigital number passes all the divisibility tests. >>> is_substring_divisible((0, 1, 2, 4, 6, 5, 7, 3, 8, 9)) False >>> is_substring_divisible((5, 1, 2, 4, 6, 0, 7, 8, 3, 9)) False >>> is_substring_divisible((1, 4, 0, 6, 3, 5, 7, 2, 8, 9)) True """ tests = [2, 3, 5, 7, 11, 13, 17] for i, test in enumerate(tests): if (num[i + 1] * 100 + num[i + 2] * 10 + num[i + 3]) % test != 0: return False return True def solution(n: int = 10) -> int: """ Returns the sum of all pandigital numbers which pass the divisiility tests. >>> solution(10) 16695334890 """ list_nums = [ int("".join(map(str, num))) for num in permutations(range(n)) if is_substring_divisible(num) ] return sum(list_nums) if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
#
#
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from typing import List def solve_maze(maze: List[List[int]]) -> bool: """ This method solves the "rat in maze" problem. In this problem we have some n by n matrix, a start point and an end point. We want to go from the start to the end. In this matrix zeroes represent walls and ones paths we can use. Parameters : maze(2D matrix) : maze Returns: Return: True if the maze has a solution or False if it does not. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 0, 1, 0, 0], ... [1, 0, 0, 1, 0]] >>> solve_maze(maze) [1, 0, 0, 0, 0] [1, 1, 1, 1, 0] [0, 0, 0, 1, 0] [0, 0, 0, 1, 1] [0, 0, 0, 0, 1] True >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0]] >>> solve_maze(maze) [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 1, 1, 1, 1] True >>> maze = [[0, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] >>> solve_maze(maze) [1, 1, 1] [0, 0, 1] [0, 0, 1] True >>> maze = [[0, 1, 0], ... [0, 1, 0], ... [1, 0, 0]] >>> solve_maze(maze) No solution exists! False >>> maze = [[0, 1], ... [1, 0]] >>> solve_maze(maze) No solution exists! False """ size = len(maze) # We need to create solution object to save path. solutions = [[0 for _ in range(size)] for _ in range(size)] solved = run_maze(maze, 0, 0, solutions) if solved: print("\n".join(str(row) for row in solutions)) else: print("No solution exists!") return solved def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. If a path is found to destination it returns True otherwise it returns False. Parameters: maze(2D matrix) : maze i, j : coordinates of matrix solutions(2D matrix) : solutions Returns: Boolean if path is found True, Otherwise False. """ size = len(maze) # Final check point. if i == j == (size - 1): solutions[i][j] = 1 return True lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds upper_flag = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. block_flag = (not (solutions[i][j])) and (not (maze[i][j])) if block_flag: # check visited solutions[i][j] = 1 # check for directions if ( run_maze(maze, i + 1, j, solutions) or run_maze(maze, i, j + 1, solutions) or run_maze(maze, i - 1, j, solutions) or run_maze(maze, i, j - 1, solutions) ): return True solutions[i][j] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
from typing import List def solve_maze(maze: List[List[int]]) -> bool: """ This method solves the "rat in maze" problem. In this problem we have some n by n matrix, a start point and an end point. We want to go from the start to the end. In this matrix zeroes represent walls and ones paths we can use. Parameters : maze(2D matrix) : maze Returns: Return: True if the maze has a solution or False if it does not. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 0, 1, 0, 0], ... [1, 0, 0, 1, 0]] >>> solve_maze(maze) [1, 0, 0, 0, 0] [1, 1, 1, 1, 0] [0, 0, 0, 1, 0] [0, 0, 0, 1, 1] [0, 0, 0, 0, 1] True >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0]] >>> solve_maze(maze) [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 0, 0, 0, 0] [1, 1, 1, 1, 1] True >>> maze = [[0, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] >>> solve_maze(maze) [1, 1, 1] [0, 0, 1] [0, 0, 1] True >>> maze = [[0, 1, 0], ... [0, 1, 0], ... [1, 0, 0]] >>> solve_maze(maze) No solution exists! False >>> maze = [[0, 1], ... [1, 0]] >>> solve_maze(maze) No solution exists! False """ size = len(maze) # We need to create solution object to save path. solutions = [[0 for _ in range(size)] for _ in range(size)] solved = run_maze(maze, 0, 0, solutions) if solved: print("\n".join(str(row) for row in solutions)) else: print("No solution exists!") return solved def run_maze(maze: List[List[int]], i: int, j: int, solutions: List[List[int]]) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. If a path is found to destination it returns True otherwise it returns False. Parameters: maze(2D matrix) : maze i, j : coordinates of matrix solutions(2D matrix) : solutions Returns: Boolean if path is found True, Otherwise False. """ size = len(maze) # Final check point. if i == j == (size - 1): solutions[i][j] = 1 return True lower_flag = (not (i < 0)) and (not (j < 0)) # Check lower bounds upper_flag = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. block_flag = (not (solutions[i][j])) and (not (maze[i][j])) if block_flag: # check visited solutions[i][j] = 1 # check for directions if ( run_maze(maze, i + 1, j, solutions) or run_maze(maze, i, j + 1, solutions) or run_maze(maze, i - 1, j, solutions) or run_maze(maze, i, j - 1, solutions) ): return True solutions[i][j] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ def __init__(self, key=None): # Stores actual heap items. self.arr = list() # Stores indexes of each item for supporting updates and deletion. self.pos_map = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) def _parent(self, i): """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None def _left(self, i): """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None def _right(self, i): """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None def _swap(self, i, j): """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] def _cmp(self, i, j): """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] def _get_valid_parent(self, i): """ Returns index of valid parent as per desired ordering among given index and both it's children """ left = self._left(i) right = self._right(i) valid_parent = i if left is not None and not self._cmp(left, valid_parent): valid_parent = left if right is not None and not self._cmp(right, valid_parent): valid_parent = right return valid_parent def _heapify_up(self, index): """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) def _heapify_down(self, index): """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) def update_item(self, item, item_value): """Updates given item value in heap if present""" if item not in self.pos_map: return index = self.pos_map[item] self.arr[index] = [item, self.key(item_value)] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(index) self._heapify_down(index) def delete_item(self, item): """Deletes given item from heap if present""" if item not in self.pos_map: return index = self.pos_map[item] del self.pos_map[item] self.arr[index] = self.arr[self.size - 1] self.pos_map[self.arr[self.size - 1][0]] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(index) self._heapify_down(index) def insert_item(self, item, item_value): """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: self.arr.append([item, self.key(item_value)]) else: self.arr[self.size] = [item, self.key(item_value)] self.pos_map[item] = self.size self.size += 1 self._heapify_up(self.size - 1) def get_top(self): """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None def extract_top(self): """ Return top item tuple (Calculated value, item) from heap and removes it as well if present """ top_item_tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0]) return top_item_tuple def test_heap() -> None: """ >>> h = Heap() # Max-heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [7, 37] >>> h.extract_top() [7, 37] >>> h.extract_top() [5, 34] >>> h.extract_top() [6, 31] >>> h = Heap(key=lambda x: -x) # Min heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [6, -31] >>> h.extract_top() [6, -31] >>> h.extract_top() [5, -34] >>> h.extract_top() [7, -37] >>> h.insert_item(8, 45) >>> h.insert_item(9, 40) >>> h.insert_item(10, 50) >>> h.get_top() [9, -40] >>> h.update_item(10, 30) >>> h.get_top() [10, -30] >>> h.delete_item(10) >>> h.get_top() [9, -40] """ pass if __name__ == "__main__": import doctest doctest.testmod()
class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ def __init__(self, key=None): # Stores actual heap items. self.arr = list() # Stores indexes of each item for supporting updates and deletion. self.pos_map = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) def _parent(self, i): """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None def _left(self, i): """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None def _right(self, i): """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None def _swap(self, i, j): """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] def _cmp(self, i, j): """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] def _get_valid_parent(self, i): """ Returns index of valid parent as per desired ordering among given index and both it's children """ left = self._left(i) right = self._right(i) valid_parent = i if left is not None and not self._cmp(left, valid_parent): valid_parent = left if right is not None and not self._cmp(right, valid_parent): valid_parent = right return valid_parent def _heapify_up(self, index): """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) def _heapify_down(self, index): """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) def update_item(self, item, item_value): """Updates given item value in heap if present""" if item not in self.pos_map: return index = self.pos_map[item] self.arr[index] = [item, self.key(item_value)] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(index) self._heapify_down(index) def delete_item(self, item): """Deletes given item from heap if present""" if item not in self.pos_map: return index = self.pos_map[item] del self.pos_map[item] self.arr[index] = self.arr[self.size - 1] self.pos_map[self.arr[self.size - 1][0]] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(index) self._heapify_down(index) def insert_item(self, item, item_value): """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: self.arr.append([item, self.key(item_value)]) else: self.arr[self.size] = [item, self.key(item_value)] self.pos_map[item] = self.size self.size += 1 self._heapify_up(self.size - 1) def get_top(self): """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None def extract_top(self): """ Return top item tuple (Calculated value, item) from heap and removes it as well if present """ top_item_tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0]) return top_item_tuple def test_heap() -> None: """ >>> h = Heap() # Max-heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [7, 37] >>> h.extract_top() [7, 37] >>> h.extract_top() [5, 34] >>> h.extract_top() [6, 31] >>> h = Heap(key=lambda x: -x) # Min heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [6, -31] >>> h.extract_top() [6, -31] >>> h.extract_top() [5, -34] >>> h.extract_top() [7, -37] >>> h.insert_item(8, 45) >>> h.insert_item(9, 40) >>> h.insert_item(10, 50) >>> h.get_top() [9, -40] >>> h.update_item(10, 30) >>> h.get_top() [10, -30] >>> h.delete_item(10) >>> h.get_top() [9, -40] """ pass if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from __future__ import annotations import csv import requests from bs4 import BeautifulSoup def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" soup = BeautifulSoup(requests.get(url).text, "html.parser") titles = soup.find_all("td", attrs="titleColumn") ratings = soup.find_all("td", class_="ratingColumn imdbRating") return { title.a.text: float(rating.strong.text) for title, rating in zip(titles, ratings) } def write_movies(filename: str = "IMDb_Top_250_Movies.csv") -> None: movies = get_imdb_top_250_movies() with open(filename, "w", newline="") as out_file: writer = csv.writer(out_file) writer.writerow(["Movie title", "IMDb rating"]) for title, rating in movies.items(): writer.writerow([title, rating]) if __name__ == "__main__": write_movies()
from __future__ import annotations import csv import requests from bs4 import BeautifulSoup def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" soup = BeautifulSoup(requests.get(url).text, "html.parser") titles = soup.find_all("td", attrs="titleColumn") ratings = soup.find_all("td", class_="ratingColumn imdbRating") return { title.a.text: float(rating.strong.text) for title, rating in zip(titles, ratings) } def write_movies(filename: str = "IMDb_Top_250_Movies.csv") -> None: movies = get_imdb_top_250_movies() with open(filename, "w", newline="") as out_file: writer = csv.writer(out_file) writer.writerow(["Movie title", "IMDb rating"]) for title, rating in movies.items(): writer.writerow([title, rating]) if __name__ == "__main__": write_movies()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
class things: def __init__(self, name, value, weight): self.name = name self.value = value self.weight = weight def __repr__(self): return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def get_value(self): return self.value def get_name(self): return self.name def get_weight(self): return self.weight def value_Weight(self): return self.value / self.weight def build_menu(name, value, weight): menu = [] for i in range(len(value)): menu.append(things(name[i], value[i], weight[i])) return menu def greedy(item, maxCost, keyFunc): itemsCopy = sorted(item, key=keyFunc, reverse=True) result = [] totalValue, total_cost = 0.0, 0.0 for i in range(len(itemsCopy)): if (total_cost + itemsCopy[i].get_weight()) <= maxCost: result.append(itemsCopy[i]) total_cost += itemsCopy[i].get_weight() totalValue += itemsCopy[i].get_value() return (result, totalValue) def test_greedy(): """ >>> food = ["Burger", "Pizza", "Coca Cola", "Rice", ... "Sambhar", "Chicken", "Fries", "Milk"] >>> value = [80, 100, 60, 70, 50, 110, 90, 60] >>> weight = [40, 60, 40, 70, 100, 85, 55, 70] >>> foods = build_menu(food, value, weight) >>> foods # doctest: +NORMALIZE_WHITESPACE [things(Burger, 80, 40), things(Pizza, 100, 60), things(Coca Cola, 60, 40), things(Rice, 70, 70), things(Sambhar, 50, 100), things(Chicken, 110, 85), things(Fries, 90, 55), things(Milk, 60, 70)] >>> greedy(foods, 500, things.get_value) # doctest: +NORMALIZE_WHITESPACE ([things(Chicken, 110, 85), things(Pizza, 100, 60), things(Fries, 90, 55), things(Burger, 80, 40), things(Rice, 70, 70), things(Coca Cola, 60, 40), things(Milk, 60, 70)], 570.0) """ if __name__ == "__main__": import doctest doctest.testmod()
class things: def __init__(self, name, value, weight): self.name = name self.value = value self.weight = weight def __repr__(self): return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def get_value(self): return self.value def get_name(self): return self.name def get_weight(self): return self.weight def value_Weight(self): return self.value / self.weight def build_menu(name, value, weight): menu = [] for i in range(len(value)): menu.append(things(name[i], value[i], weight[i])) return menu def greedy(item, maxCost, keyFunc): itemsCopy = sorted(item, key=keyFunc, reverse=True) result = [] totalValue, total_cost = 0.0, 0.0 for i in range(len(itemsCopy)): if (total_cost + itemsCopy[i].get_weight()) <= maxCost: result.append(itemsCopy[i]) total_cost += itemsCopy[i].get_weight() totalValue += itemsCopy[i].get_value() return (result, totalValue) def test_greedy(): """ >>> food = ["Burger", "Pizza", "Coca Cola", "Rice", ... "Sambhar", "Chicken", "Fries", "Milk"] >>> value = [80, 100, 60, 70, 50, 110, 90, 60] >>> weight = [40, 60, 40, 70, 100, 85, 55, 70] >>> foods = build_menu(food, value, weight) >>> foods # doctest: +NORMALIZE_WHITESPACE [things(Burger, 80, 40), things(Pizza, 100, 60), things(Coca Cola, 60, 40), things(Rice, 70, 70), things(Sambhar, 50, 100), things(Chicken, 110, 85), things(Fries, 90, 55), things(Milk, 60, 70)] >>> greedy(foods, 500, things.get_value) # doctest: +NORMALIZE_WHITESPACE ([things(Chicken, 110, 85), things(Pizza, 100, 60), things(Fries, 90, 55), things(Burger, 80, 40), things(Rice, 70, 70), things(Coca Cola, 60, 40), things(Milk, 60, 70)], 570.0) """ if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from __future__ import annotations import random class Dice: NUM_SIDES = 6 def __init__(self): """ Initialize a six sided dice """ self.sides = list(range(1, Dice.NUM_SIDES + 1)) def roll(self): return random.choice(self.sides) def _str_(self): return "Fair Dice" def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ Return probability list of all possible sums when throwing dice. >>> random.seed(0) >>> throw_dice(10, 1) [10.0, 0.0, 30.0, 50.0, 10.0, 0.0] >>> throw_dice(100, 1) [19.0, 17.0, 17.0, 11.0, 23.0, 13.0] >>> throw_dice(1000, 1) [18.8, 15.5, 16.3, 17.6, 14.2, 17.6] >>> throw_dice(10000, 1) [16.35, 16.89, 16.93, 16.6, 16.52, 16.71] >>> throw_dice(10000, 2) [2.74, 5.6, 7.99, 11.26, 13.92, 16.7, 14.44, 10.63, 8.05, 5.92, 2.75] """ dices = [Dice() for i in range(num_dice)] count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1) for i in range(num_throws): count_of_sum[sum(dice.roll() for dice in dices)] += 1 probability = [round((count * 100) / num_throws, 2) for count in count_of_sum] return probability[num_dice:] # remove probability of sums that never appear if __name__ == "__main__": import doctest doctest.testmod()
from __future__ import annotations import random class Dice: NUM_SIDES = 6 def __init__(self): """ Initialize a six sided dice """ self.sides = list(range(1, Dice.NUM_SIDES + 1)) def roll(self): return random.choice(self.sides) def _str_(self): return "Fair Dice" def throw_dice(num_throws: int, num_dice: int = 2) -> list[float]: """ Return probability list of all possible sums when throwing dice. >>> random.seed(0) >>> throw_dice(10, 1) [10.0, 0.0, 30.0, 50.0, 10.0, 0.0] >>> throw_dice(100, 1) [19.0, 17.0, 17.0, 11.0, 23.0, 13.0] >>> throw_dice(1000, 1) [18.8, 15.5, 16.3, 17.6, 14.2, 17.6] >>> throw_dice(10000, 1) [16.35, 16.89, 16.93, 16.6, 16.52, 16.71] >>> throw_dice(10000, 2) [2.74, 5.6, 7.99, 11.26, 13.92, 16.7, 14.44, 10.63, 8.05, 5.92, 2.75] """ dices = [Dice() for i in range(num_dice)] count_of_sum = [0] * (len(dices) * Dice.NUM_SIDES + 1) for i in range(num_throws): count_of_sum[sum(dice.roll() for dice in dices)] += 1 probability = [round((count * 100) / num_throws, 2) for count in count_of_sum] return probability[num_dice:] # remove probability of sums that never appear if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# flake8: noqa """ Binomial Heap Reference: Advanced Data Structures, Peter Brass """ class Node: """ Node in a doubly-linked binomial tree, containing: - value - size of left subtree - link to left, right and parent nodes """ def __init__(self, val): self.val = val # Number of nodes in left subtree self.left_tree_size = 0 self.left = None self.right = None self.parent = None def mergeTrees(self, other): """ In-place merge of two binomial trees of equal size. Returns the root of the resulting tree """ assert self.left_tree_size == other.left_tree_size, "Unequal Sizes of Blocks" if self.val < other.val: other.left = self.right other.parent = None if self.right: self.right.parent = other self.right = other self.left_tree_size = self.left_tree_size * 2 + 1 return self else: self.left = other.right self.parent = None if other.right: other.right.parent = self other.right = self other.left_tree_size = other.left_tree_size * 2 + 1 return other class BinomialHeap: r""" Min-oriented priority queue implemented with the Binomial Heap data structure implemented with the BinomialHeap class. It supports: - Insert element in a heap with n elements: Guaranteed logn, amoratized 1 - Merge (meld) heaps of size m and n: O(logn + logm) - Delete Min: O(logn) - Peek (return min without deleting it): O(1) Example: Create a random permutation of 30 integers to be inserted and 19 of them deleted >>> import numpy as np >>> permutation = np.random.permutation(list(range(30))) Create a Heap and insert the 30 integers __init__() test >>> first_heap = BinomialHeap() 30 inserts - insert() test >>> for number in permutation: ... first_heap.insert(number) Size test >>> print(first_heap.size) 30 Deleting - delete() test >>> for i in range(25): ... print(first_heap.deleteMin(), end=" ") 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 Create a new Heap >>> second_heap = BinomialHeap() >>> vals = [17, 20, 31, 34] >>> for value in vals: ... second_heap.insert(value) The heap should have the following structure: 17 / \ # 31 / \ 20 34 / \ / \ # # # # preOrder() test >>> print(second_heap.preOrder()) [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] printing Heap - __str__() test >>> print(second_heap) 17 -# -31 --20 ---# ---# --34 ---# ---# mergeHeaps() test >>> merged = second_heap.mergeHeaps(first_heap) >>> merged.peek() 17 values in merged heap; (merge is inplace) >>> while not first_heap.isEmpty(): ... print(first_heap.deleteMin(), end=" ") 17 20 25 26 27 28 29 31 34 """ def __init__(self, bottom_root=None, min_node=None, heap_size=0): self.size = heap_size self.bottom_root = bottom_root self.min_node = min_node def mergeHeaps(self, other): """ In-place merge of two binomial heaps. Both of them become the resulting merged heap """ # Empty heaps corner cases if other.size == 0: return if self.size == 0: self.size = other.size self.bottom_root = other.bottom_root self.min_node = other.min_node return # Update size self.size = self.size + other.size # Update min.node if self.min_node.val > other.min_node.val: self.min_node = other.min_node # Merge # Order roots by left_subtree_size combined_roots_list = [] i, j = self.bottom_root, other.bottom_root while i or j: if i and ((not j) or i.left_tree_size < j.left_tree_size): combined_roots_list.append((i, True)) i = i.parent else: combined_roots_list.append((j, False)) j = j.parent # Insert links between them for i in range(len(combined_roots_list) - 1): if combined_roots_list[i][1] != combined_roots_list[i + 1][1]: combined_roots_list[i][0].parent = combined_roots_list[i + 1][0] combined_roots_list[i + 1][0].left = combined_roots_list[i][0] # Consecutively merge roots with same left_tree_size i = combined_roots_list[0][0] while i.parent: if ( (i.left_tree_size == i.parent.left_tree_size) and (not i.parent.parent) ) or ( i.left_tree_size == i.parent.left_tree_size and i.left_tree_size != i.parent.parent.left_tree_size ): # Neighbouring Nodes previous_node = i.left next_node = i.parent.parent # Merging trees i = i.mergeTrees(i.parent) # Updating links i.left = previous_node i.parent = next_node if previous_node: previous_node.parent = i if next_node: next_node.left = i else: i = i.parent # Updating self.bottom_root while i.left: i = i.left self.bottom_root = i # Update other other.size = self.size other.bottom_root = self.bottom_root other.min_node = self.min_node # Return the merged heap return self def insert(self, val): """ insert a value in the heap """ if self.size == 0: self.bottom_root = Node(val) self.size = 1 self.min_node = self.bottom_root else: # Create new node new_node = Node(val) # Update size self.size += 1 # update min_node if val < self.min_node.val: self.min_node = new_node # Put new_node as a bottom_root in heap self.bottom_root.left = new_node new_node.parent = self.bottom_root self.bottom_root = new_node # Consecutively merge roots with same left_tree_size while ( self.bottom_root.parent and self.bottom_root.left_tree_size == self.bottom_root.parent.left_tree_size ): # Next node next_node = self.bottom_root.parent.parent # Merge self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent) # Update Links self.bottom_root.parent = next_node self.bottom_root.left = None if next_node: next_node.left = self.bottom_root def peek(self): """ return min element without deleting it """ return self.min_node.val def isEmpty(self): return self.size == 0 def deleteMin(self): """ delete min element and return it """ # assert not self.isEmpty(), "Empty Heap" # Save minimal value min_value = self.min_node.val # Last element in heap corner case if self.size == 1: # Update size self.size = 0 # Update bottom root self.bottom_root = None # Update min_node self.min_node = None return min_value # No right subtree corner case # The structure of the tree implies that this should be the bottom root # and there is at least one other root if self.min_node.right is None: # Update size self.size -= 1 # Update bottom root self.bottom_root = self.bottom_root.parent self.bottom_root.left = None # Update min_node self.min_node = self.bottom_root i = self.bottom_root.parent while i: if i.val < self.min_node.val: self.min_node = i i = i.parent return min_value # General case # Find the BinomialHeap of the right subtree of min_node bottom_of_new = self.min_node.right bottom_of_new.parent = None min_of_new = bottom_of_new size_of_new = 1 # Size, min_node and bottom_root while bottom_of_new.left: size_of_new = size_of_new * 2 + 1 bottom_of_new = bottom_of_new.left if bottom_of_new.val < min_of_new.val: min_of_new = bottom_of_new # Corner case of single root on top left path if (not self.min_node.left) and (not self.min_node.parent): self.size = size_of_new self.bottom_root = bottom_of_new self.min_node = min_of_new # print("Single root, multiple nodes case") return min_value # Remaining cases # Construct heap of right subtree newHeap = BinomialHeap( bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new ) # Update size self.size = self.size - 1 - size_of_new # Neighbour nodes previous_node = self.min_node.left next_node = self.min_node.parent # Initialize new bottom_root and min_node self.min_node = previous_node or next_node self.bottom_root = next_node # Update links of previous_node and search below for new min_node and # bottom_root if previous_node: previous_node.parent = next_node # Update bottom_root and search for min_node below self.bottom_root = previous_node self.min_node = previous_node while self.bottom_root.left: self.bottom_root = self.bottom_root.left if self.bottom_root.val < self.min_node.val: self.min_node = self.bottom_root if next_node: next_node.left = previous_node # Search for new min_node above min_node i = next_node while i: if i.val < self.min_node.val: self.min_node = i i = i.parent # Merge heaps self.mergeHeaps(newHeap) return min_value def preOrder(self): """ Returns the Pre-order representation of the heap including values of nodes plus their level distance from the root; Empty nodes appear as # """ # Find top root top_root = self.bottom_root while top_root.parent: top_root = top_root.parent # preorder heap_preOrder = [] self.__traversal(top_root, heap_preOrder) return heap_preOrder def __traversal(self, curr_node, preorder, level=0): """ Pre-order traversal of nodes """ if curr_node: preorder.append((curr_node.val, level)) self.__traversal(curr_node.left, preorder, level + 1) self.__traversal(curr_node.right, preorder, level + 1) else: preorder.append(("#", level)) def __str__(self): """ Overwriting str for a pre-order print of nodes in heap; Performance is poor, so use only for small examples """ if self.isEmpty(): return "" preorder_heap = self.preOrder() return "\n".join(("-" * level + str(value)) for value, level in preorder_heap) # Unit Tests if __name__ == "__main__": import doctest doctest.testmod()
# flake8: noqa """ Binomial Heap Reference: Advanced Data Structures, Peter Brass """ class Node: """ Node in a doubly-linked binomial tree, containing: - value - size of left subtree - link to left, right and parent nodes """ def __init__(self, val): self.val = val # Number of nodes in left subtree self.left_tree_size = 0 self.left = None self.right = None self.parent = None def mergeTrees(self, other): """ In-place merge of two binomial trees of equal size. Returns the root of the resulting tree """ assert self.left_tree_size == other.left_tree_size, "Unequal Sizes of Blocks" if self.val < other.val: other.left = self.right other.parent = None if self.right: self.right.parent = other self.right = other self.left_tree_size = self.left_tree_size * 2 + 1 return self else: self.left = other.right self.parent = None if other.right: other.right.parent = self other.right = self other.left_tree_size = other.left_tree_size * 2 + 1 return other class BinomialHeap: r""" Min-oriented priority queue implemented with the Binomial Heap data structure implemented with the BinomialHeap class. It supports: - Insert element in a heap with n elements: Guaranteed logn, amoratized 1 - Merge (meld) heaps of size m and n: O(logn + logm) - Delete Min: O(logn) - Peek (return min without deleting it): O(1) Example: Create a random permutation of 30 integers to be inserted and 19 of them deleted >>> import numpy as np >>> permutation = np.random.permutation(list(range(30))) Create a Heap and insert the 30 integers __init__() test >>> first_heap = BinomialHeap() 30 inserts - insert() test >>> for number in permutation: ... first_heap.insert(number) Size test >>> print(first_heap.size) 30 Deleting - delete() test >>> for i in range(25): ... print(first_heap.deleteMin(), end=" ") 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 Create a new Heap >>> second_heap = BinomialHeap() >>> vals = [17, 20, 31, 34] >>> for value in vals: ... second_heap.insert(value) The heap should have the following structure: 17 / \ # 31 / \ 20 34 / \ / \ # # # # preOrder() test >>> print(second_heap.preOrder()) [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] printing Heap - __str__() test >>> print(second_heap) 17 -# -31 --20 ---# ---# --34 ---# ---# mergeHeaps() test >>> merged = second_heap.mergeHeaps(first_heap) >>> merged.peek() 17 values in merged heap; (merge is inplace) >>> while not first_heap.isEmpty(): ... print(first_heap.deleteMin(), end=" ") 17 20 25 26 27 28 29 31 34 """ def __init__(self, bottom_root=None, min_node=None, heap_size=0): self.size = heap_size self.bottom_root = bottom_root self.min_node = min_node def mergeHeaps(self, other): """ In-place merge of two binomial heaps. Both of them become the resulting merged heap """ # Empty heaps corner cases if other.size == 0: return if self.size == 0: self.size = other.size self.bottom_root = other.bottom_root self.min_node = other.min_node return # Update size self.size = self.size + other.size # Update min.node if self.min_node.val > other.min_node.val: self.min_node = other.min_node # Merge # Order roots by left_subtree_size combined_roots_list = [] i, j = self.bottom_root, other.bottom_root while i or j: if i and ((not j) or i.left_tree_size < j.left_tree_size): combined_roots_list.append((i, True)) i = i.parent else: combined_roots_list.append((j, False)) j = j.parent # Insert links between them for i in range(len(combined_roots_list) - 1): if combined_roots_list[i][1] != combined_roots_list[i + 1][1]: combined_roots_list[i][0].parent = combined_roots_list[i + 1][0] combined_roots_list[i + 1][0].left = combined_roots_list[i][0] # Consecutively merge roots with same left_tree_size i = combined_roots_list[0][0] while i.parent: if ( (i.left_tree_size == i.parent.left_tree_size) and (not i.parent.parent) ) or ( i.left_tree_size == i.parent.left_tree_size and i.left_tree_size != i.parent.parent.left_tree_size ): # Neighbouring Nodes previous_node = i.left next_node = i.parent.parent # Merging trees i = i.mergeTrees(i.parent) # Updating links i.left = previous_node i.parent = next_node if previous_node: previous_node.parent = i if next_node: next_node.left = i else: i = i.parent # Updating self.bottom_root while i.left: i = i.left self.bottom_root = i # Update other other.size = self.size other.bottom_root = self.bottom_root other.min_node = self.min_node # Return the merged heap return self def insert(self, val): """ insert a value in the heap """ if self.size == 0: self.bottom_root = Node(val) self.size = 1 self.min_node = self.bottom_root else: # Create new node new_node = Node(val) # Update size self.size += 1 # update min_node if val < self.min_node.val: self.min_node = new_node # Put new_node as a bottom_root in heap self.bottom_root.left = new_node new_node.parent = self.bottom_root self.bottom_root = new_node # Consecutively merge roots with same left_tree_size while ( self.bottom_root.parent and self.bottom_root.left_tree_size == self.bottom_root.parent.left_tree_size ): # Next node next_node = self.bottom_root.parent.parent # Merge self.bottom_root = self.bottom_root.mergeTrees(self.bottom_root.parent) # Update Links self.bottom_root.parent = next_node self.bottom_root.left = None if next_node: next_node.left = self.bottom_root def peek(self): """ return min element without deleting it """ return self.min_node.val def isEmpty(self): return self.size == 0 def deleteMin(self): """ delete min element and return it """ # assert not self.isEmpty(), "Empty Heap" # Save minimal value min_value = self.min_node.val # Last element in heap corner case if self.size == 1: # Update size self.size = 0 # Update bottom root self.bottom_root = None # Update min_node self.min_node = None return min_value # No right subtree corner case # The structure of the tree implies that this should be the bottom root # and there is at least one other root if self.min_node.right is None: # Update size self.size -= 1 # Update bottom root self.bottom_root = self.bottom_root.parent self.bottom_root.left = None # Update min_node self.min_node = self.bottom_root i = self.bottom_root.parent while i: if i.val < self.min_node.val: self.min_node = i i = i.parent return min_value # General case # Find the BinomialHeap of the right subtree of min_node bottom_of_new = self.min_node.right bottom_of_new.parent = None min_of_new = bottom_of_new size_of_new = 1 # Size, min_node and bottom_root while bottom_of_new.left: size_of_new = size_of_new * 2 + 1 bottom_of_new = bottom_of_new.left if bottom_of_new.val < min_of_new.val: min_of_new = bottom_of_new # Corner case of single root on top left path if (not self.min_node.left) and (not self.min_node.parent): self.size = size_of_new self.bottom_root = bottom_of_new self.min_node = min_of_new # print("Single root, multiple nodes case") return min_value # Remaining cases # Construct heap of right subtree newHeap = BinomialHeap( bottom_root=bottom_of_new, min_node=min_of_new, heap_size=size_of_new ) # Update size self.size = self.size - 1 - size_of_new # Neighbour nodes previous_node = self.min_node.left next_node = self.min_node.parent # Initialize new bottom_root and min_node self.min_node = previous_node or next_node self.bottom_root = next_node # Update links of previous_node and search below for new min_node and # bottom_root if previous_node: previous_node.parent = next_node # Update bottom_root and search for min_node below self.bottom_root = previous_node self.min_node = previous_node while self.bottom_root.left: self.bottom_root = self.bottom_root.left if self.bottom_root.val < self.min_node.val: self.min_node = self.bottom_root if next_node: next_node.left = previous_node # Search for new min_node above min_node i = next_node while i: if i.val < self.min_node.val: self.min_node = i i = i.parent # Merge heaps self.mergeHeaps(newHeap) return min_value def preOrder(self): """ Returns the Pre-order representation of the heap including values of nodes plus their level distance from the root; Empty nodes appear as # """ # Find top root top_root = self.bottom_root while top_root.parent: top_root = top_root.parent # preorder heap_preOrder = [] self.__traversal(top_root, heap_preOrder) return heap_preOrder def __traversal(self, curr_node, preorder, level=0): """ Pre-order traversal of nodes """ if curr_node: preorder.append((curr_node.val, level)) self.__traversal(curr_node.left, preorder, level + 1) self.__traversal(curr_node.right, preorder, level + 1) else: preorder.append(("#", level)) def __str__(self): """ Overwriting str for a pre-order print of nodes in heap; Performance is poor, so use only for small examples """ if self.isEmpty(): return "" preorder_heap = self.preOrder() return "\n".join(("-" * level + str(value)) for value, level in preorder_heap) # Unit Tests if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" This is to show simple COVID19 info fetching from worldometers site using lxml * The main motivation to use lxml in place of bs4 is that it is faster and therefore more convenient to use in Python web projects (e.g. Django or Flask-based) """ from collections import namedtuple import requests from lxml import html # type: ignore covid_data = namedtuple("covid_data", "cases deaths recovered") def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> covid_data: xpath_str = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(url).content).xpath(xpath_str)) fmt = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
""" This is to show simple COVID19 info fetching from worldometers site using lxml * The main motivation to use lxml in place of bs4 is that it is faster and therefore more convenient to use in Python web projects (e.g. Django or Flask-based) """ from collections import namedtuple import requests from lxml import html # type: ignore covid_data = namedtuple("covid_data", "cases deaths recovered") def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> covid_data: xpath_str = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(url).content).xpath(xpath_str)) fmt = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import math """ In cryptography, the TRANSPOSITION cipher is a method of encryption where the positions of plaintext are shifted a certain number(determined by the key) that follows a regular system that results in the permuted text, known as the encrypted text. The type of transposition cipher demonstrated under is the ROUTE cipher. """ def main(): message = input("Enter message: ") key = int(input("Enter key [2-%s]: " % (len(message) - 1))) mode = input("Encryption/Decryption [e/d]: ") if mode.lower().startswith("e"): text = encryptMessage(key, message) elif mode.lower().startswith("d"): text = decryptMessage(key, message) # Append pipe symbol (vertical bar) to identify spaces at the end. print("Output:\n%s" % (text + "|")) def encryptMessage(key: int, message: str) -> str: """ >>> encryptMessage(6, 'Harshil Darji') 'Hlia rDsahrij' """ cipherText = [""] * key for col in range(key): pointer = col while pointer < len(message): cipherText[col] += message[pointer] pointer += key return "".join(cipherText) def decryptMessage(key: int, message: str) -> str: """ >>> decryptMessage(6, 'Hlia rDsahrij') 'Harshil Darji' """ numCols = math.ceil(len(message) / key) numRows = key numShadedBoxes = (numCols * numRows) - len(message) plainText = [""] * numCols col = 0 row = 0 for symbol in message: plainText[col] += symbol col += 1 if ( (col == numCols) or (col == numCols - 1) and (row >= numRows - numShadedBoxes) ): col = 0 row += 1 return "".join(plainText) if __name__ == "__main__": import doctest doctest.testmod() main()
import math """ In cryptography, the TRANSPOSITION cipher is a method of encryption where the positions of plaintext are shifted a certain number(determined by the key) that follows a regular system that results in the permuted text, known as the encrypted text. The type of transposition cipher demonstrated under is the ROUTE cipher. """ def main(): message = input("Enter message: ") key = int(input("Enter key [2-%s]: " % (len(message) - 1))) mode = input("Encryption/Decryption [e/d]: ") if mode.lower().startswith("e"): text = encryptMessage(key, message) elif mode.lower().startswith("d"): text = decryptMessage(key, message) # Append pipe symbol (vertical bar) to identify spaces at the end. print("Output:\n%s" % (text + "|")) def encryptMessage(key: int, message: str) -> str: """ >>> encryptMessage(6, 'Harshil Darji') 'Hlia rDsahrij' """ cipherText = [""] * key for col in range(key): pointer = col while pointer < len(message): cipherText[col] += message[pointer] pointer += key return "".join(cipherText) def decryptMessage(key: int, message: str) -> str: """ >>> decryptMessage(6, 'Hlia rDsahrij') 'Harshil Darji' """ numCols = math.ceil(len(message) / key) numRows = key numShadedBoxes = (numCols * numRows) - len(message) plainText = [""] * numCols col = 0 row = 0 for symbol in message: plainText[col] += symbol col += 1 if ( (col == numCols) or (col == numCols - 1) and (row >= numRows - numShadedBoxes) ): col = 0 row += 1 return "".join(plainText) if __name__ == "__main__": import doctest doctest.testmod() main()
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 129: https://projecteuler.net/problem=129 A number consisting entirely of ones is called a repunit. We shall define R(k) to be a repunit of length k; for example, R(6) = 111111. Given that n is a positive integer and GCD(n, 10) = 1, it can be shown that there always exists a value, k, for which R(k) is divisible by n, and let A(n) be the least such value of k; for example, A(7) = 6 and A(41) = 5. The least value of n for which A(n) first exceeds ten is 17. Find the least value of n for which A(n) first exceeds one-million. """ def least_divisible_repunit(divisor: int) -> int: """ Return the least value k such that the Repunit of length k is divisible by divisor. >>> least_divisible_repunit(7) 6 >>> least_divisible_repunit(41) 5 >>> least_divisible_repunit(1234567) 34020 """ if divisor % 5 == 0 or divisor % 2 == 0: return 0 repunit = 1 repunit_index = 1 while repunit: repunit = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def solution(limit: int = 1000000) -> int: """ Return the least value of n for which least_divisible_repunit(n) first exceeds limit. >>> solution(10) 17 >>> solution(100) 109 >>> solution(1000) 1017 """ divisor = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(divisor) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 129: https://projecteuler.net/problem=129 A number consisting entirely of ones is called a repunit. We shall define R(k) to be a repunit of length k; for example, R(6) = 111111. Given that n is a positive integer and GCD(n, 10) = 1, it can be shown that there always exists a value, k, for which R(k) is divisible by n, and let A(n) be the least such value of k; for example, A(7) = 6 and A(41) = 5. The least value of n for which A(n) first exceeds ten is 17. Find the least value of n for which A(n) first exceeds one-million. """ def least_divisible_repunit(divisor: int) -> int: """ Return the least value k such that the Repunit of length k is divisible by divisor. >>> least_divisible_repunit(7) 6 >>> least_divisible_repunit(41) 5 >>> least_divisible_repunit(1234567) 34020 """ if divisor % 5 == 0 or divisor % 2 == 0: return 0 repunit = 1 repunit_index = 1 while repunit: repunit = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def solution(limit: int = 1000000) -> int: """ Return the least value of n for which least_divisible_repunit(n) first exceeds limit. >>> solution(10) 17 >>> solution(100) 109 >>> solution(1000) 1017 """ divisor = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(divisor) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" A recursive implementation of the insertion sort algorithm """ from __future__ import annotations from typing import List def rec_insertion_sort(collection: list, n: int): """ Given a collection of numbers and its length, sorts the collections in ascending order :param collection: A mutable collection of comparable elements :param n: The length of collections >>> col = [1, 2, 1] >>> rec_insertion_sort(col, len(col)) >>> print(col) [1, 1, 2] >>> col = [2, 1, 0, -1, -2] >>> rec_insertion_sort(col, len(col)) >>> print(col) [-2, -1, 0, 1, 2] >>> col = [1] >>> rec_insertion_sort(col, len(col)) >>> print(col) [1] """ # Checks if the entire collection has been sorted if len(collection) <= 1 or n <= 1: return insert_next(collection, n - 1) rec_insertion_sort(collection, n - 1) def insert_next(collection: list, index: int): """ Inserts the '(index-1)th' element into place >>> col = [3, 2, 4, 2] >>> insert_next(col, 1) >>> print(col) [2, 3, 4, 2] >>> col = [3, 2, 3] >>> insert_next(col, 2) >>> print(col) [3, 2, 3] >>> col = [] >>> insert_next(col, 1) >>> print(col) [] """ # Checks order between adjacent elements if index >= len(collection) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order collection[index - 1], collection[index] = ( collection[index], collection[index - 1], ) insert_next(collection, index + 1) if __name__ == "__main__": numbers = input("Enter integers separated by spaces: ") number_list: List[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
""" A recursive implementation of the insertion sort algorithm """ from __future__ import annotations from typing import List def rec_insertion_sort(collection: list, n: int): """ Given a collection of numbers and its length, sorts the collections in ascending order :param collection: A mutable collection of comparable elements :param n: The length of collections >>> col = [1, 2, 1] >>> rec_insertion_sort(col, len(col)) >>> print(col) [1, 1, 2] >>> col = [2, 1, 0, -1, -2] >>> rec_insertion_sort(col, len(col)) >>> print(col) [-2, -1, 0, 1, 2] >>> col = [1] >>> rec_insertion_sort(col, len(col)) >>> print(col) [1] """ # Checks if the entire collection has been sorted if len(collection) <= 1 or n <= 1: return insert_next(collection, n - 1) rec_insertion_sort(collection, n - 1) def insert_next(collection: list, index: int): """ Inserts the '(index-1)th' element into place >>> col = [3, 2, 4, 2] >>> insert_next(col, 1) >>> print(col) [2, 3, 4, 2] >>> col = [3, 2, 3] >>> insert_next(col, 2) >>> print(col) [3, 2, 3] >>> col = [] >>> insert_next(col, 1) >>> print(col) [] """ # Checks order between adjacent elements if index >= len(collection) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order collection[index - 1], collection[index] = ( collection[index], collection[index - 1], ) insert_next(collection, index + 1) if __name__ == "__main__": numbers = input("Enter integers separated by spaces: ") number_list: List[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
-1
TheAlgorithms/Python
4,304
[mypy] Fix directory arithmetic_analysis
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-04-02T07:15:46Z"
"2021-04-02T07:32:13Z"
895bca36541598a04dba525568a20d2282e0ffd9
5229c749553d9ec65d455e0183a574e45ac3e73e
[mypy] Fix directory arithmetic_analysis. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
name: "build" on: pull_request: schedule: - cron: "0 0 * * *" # Run everyday jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: "3.9" - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings|web_programming*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
name: "build" on: pull_request: schedule: - cron: "0 0 * * *" # Run everyday jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: "3.9" - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in the exclude directories and remove them below - run: mypy --ignore-missing-imports --exclude '(arithmetic_analysis|ciphers|conversions|data_structures|digital_image_processing|dynamic_programming|graphs|hashes|linear_algebra|maths|matrix|other|project_euler|scripts|searches|strings*)/$' . - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" This is used to convert the currency using the Amdoren Currency API https://www.amdoren.com """ import os import requests URL_BASE = "https://www.amdoren.com/api/currency.php" TESTING = os.getenv("CI", False) API_KEY = os.getenv("AMDOREN_API_KEY") if not API_KEY and not TESTING: raise KeyError("Please put your API key in an environment variable.") # Currency and their description list_of_currencies = """ AED United Arab Emirates Dirham AFN Afghan Afghani ALL Albanian Lek AMD Armenian Dram ANG Netherlands Antillean Guilder AOA Angolan Kwanza ARS Argentine Peso AUD Australian Dollar AWG Aruban Florin AZN Azerbaijani Manat BAM Bosnia & Herzegovina Convertible Mark BBD Barbadian Dollar BDT Bangladeshi Taka BGN Bulgarian Lev BHD Bahraini Dinar BIF Burundian Franc BMD Bermudian Dollar BND Brunei Dollar BOB Bolivian Boliviano BRL Brazilian Real BSD Bahamian Dollar BTN Bhutanese Ngultrum BWP Botswana Pula BYN Belarus Ruble BZD Belize Dollar CAD Canadian Dollar CDF Congolese Franc CHF Swiss Franc CLP Chilean Peso CNY Chinese Yuan COP Colombian Peso CRC Costa Rican Colon CUC Cuban Convertible Peso CVE Cape Verdean Escudo CZK Czech Republic Koruna DJF Djiboutian Franc DKK Danish Krone DOP Dominican Peso DZD Algerian Dinar EGP Egyptian Pound ERN Eritrean Nakfa ETB Ethiopian Birr EUR Euro FJD Fiji Dollar GBP British Pound Sterling GEL Georgian Lari GHS Ghanaian Cedi GIP Gibraltar Pound GMD Gambian Dalasi GNF Guinea Franc GTQ Guatemalan Quetzal GYD Guyanaese Dollar HKD Hong Kong Dollar HNL Honduran Lempira HRK Croatian Kuna HTG Haiti Gourde HUF Hungarian Forint IDR Indonesian Rupiah ILS Israeli Shekel INR Indian Rupee IQD Iraqi Dinar IRR Iranian Rial ISK Icelandic Krona JMD Jamaican Dollar JOD Jordanian Dinar JPY Japanese Yen KES Kenyan Shilling KGS Kyrgystani Som KHR Cambodian Riel KMF Comorian Franc KPW North Korean Won KRW South Korean Won KWD Kuwaiti Dinar KYD Cayman Islands Dollar KZT Kazakhstan Tenge LAK Laotian Kip LBP Lebanese Pound LKR Sri Lankan Rupee LRD Liberian Dollar LSL Lesotho Loti LYD Libyan Dinar MAD Moroccan Dirham MDL Moldovan Leu MGA Malagasy Ariary MKD Macedonian Denar MMK Myanma Kyat MNT Mongolian Tugrik MOP Macau Pataca MRO Mauritanian Ouguiya MUR Mauritian Rupee MVR Maldivian Rufiyaa MWK Malawi Kwacha MXN Mexican Peso MYR Malaysian Ringgit MZN Mozambican Metical NAD Namibian Dollar NGN Nigerian Naira NIO Nicaragua Cordoba NOK Norwegian Krone NPR Nepalese Rupee NZD New Zealand Dollar OMR Omani Rial PAB Panamanian Balboa PEN Peruvian Nuevo Sol PGK Papua New Guinean Kina PHP Philippine Peso PKR Pakistani Rupee PLN Polish Zloty PYG Paraguayan Guarani QAR Qatari Riyal RON Romanian Leu RSD Serbian Dinar RUB Russian Ruble RWF Rwanda Franc SAR Saudi Riyal SBD Solomon Islands Dollar SCR Seychellois Rupee SDG Sudanese Pound SEK Swedish Krona SGD Singapore Dollar SHP Saint Helena Pound SLL Sierra Leonean Leone SOS Somali Shilling SRD Surinamese Dollar SSP South Sudanese Pound STD Sao Tome and Principe Dobra SYP Syrian Pound SZL Swazi Lilangeni THB Thai Baht TJS Tajikistan Somoni TMT Turkmenistani Manat TND Tunisian Dinar TOP Tonga Paanga TRY Turkish Lira TTD Trinidad and Tobago Dollar TWD New Taiwan Dollar TZS Tanzanian Shilling UAH Ukrainian Hryvnia UGX Ugandan Shilling USD United States Dollar UYU Uruguayan Peso UZS Uzbekistan Som VEF Venezuelan Bolivar VND Vietnamese Dong VUV Vanuatu Vatu WST Samoan Tala XAF Central African CFA franc XCD East Caribbean Dollar XOF West African CFA franc XPF CFP Franc YER Yemeni Rial ZAR South African Rand ZMW Zambian Kwacha """ def convert_currency( from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY ) -> str: """https://www.amdoren.com/currency-api/""" params = locals() params["from"] = params.pop("from_") res = requests.get(URL_BASE, params=params).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] if __name__ == "__main__": print( convert_currency( input("Enter from currency: ").strip(), input("Enter to currency: ").strip(), float(input("Enter the amount: ").strip()), ) )
""" This is used to convert the currency using the Amdoren Currency API https://www.amdoren.com """ import os import requests URL_BASE = "https://www.amdoren.com/api/currency.php" TESTING = os.getenv("CI", False) API_KEY = os.getenv("AMDOREN_API_KEY", "") if not API_KEY and not TESTING: raise KeyError("Please put your API key in an environment variable.") # Currency and their description list_of_currencies = """ AED United Arab Emirates Dirham AFN Afghan Afghani ALL Albanian Lek AMD Armenian Dram ANG Netherlands Antillean Guilder AOA Angolan Kwanza ARS Argentine Peso AUD Australian Dollar AWG Aruban Florin AZN Azerbaijani Manat BAM Bosnia & Herzegovina Convertible Mark BBD Barbadian Dollar BDT Bangladeshi Taka BGN Bulgarian Lev BHD Bahraini Dinar BIF Burundian Franc BMD Bermudian Dollar BND Brunei Dollar BOB Bolivian Boliviano BRL Brazilian Real BSD Bahamian Dollar BTN Bhutanese Ngultrum BWP Botswana Pula BYN Belarus Ruble BZD Belize Dollar CAD Canadian Dollar CDF Congolese Franc CHF Swiss Franc CLP Chilean Peso CNY Chinese Yuan COP Colombian Peso CRC Costa Rican Colon CUC Cuban Convertible Peso CVE Cape Verdean Escudo CZK Czech Republic Koruna DJF Djiboutian Franc DKK Danish Krone DOP Dominican Peso DZD Algerian Dinar EGP Egyptian Pound ERN Eritrean Nakfa ETB Ethiopian Birr EUR Euro FJD Fiji Dollar GBP British Pound Sterling GEL Georgian Lari GHS Ghanaian Cedi GIP Gibraltar Pound GMD Gambian Dalasi GNF Guinea Franc GTQ Guatemalan Quetzal GYD Guyanaese Dollar HKD Hong Kong Dollar HNL Honduran Lempira HRK Croatian Kuna HTG Haiti Gourde HUF Hungarian Forint IDR Indonesian Rupiah ILS Israeli Shekel INR Indian Rupee IQD Iraqi Dinar IRR Iranian Rial ISK Icelandic Krona JMD Jamaican Dollar JOD Jordanian Dinar JPY Japanese Yen KES Kenyan Shilling KGS Kyrgystani Som KHR Cambodian Riel KMF Comorian Franc KPW North Korean Won KRW South Korean Won KWD Kuwaiti Dinar KYD Cayman Islands Dollar KZT Kazakhstan Tenge LAK Laotian Kip LBP Lebanese Pound LKR Sri Lankan Rupee LRD Liberian Dollar LSL Lesotho Loti LYD Libyan Dinar MAD Moroccan Dirham MDL Moldovan Leu MGA Malagasy Ariary MKD Macedonian Denar MMK Myanma Kyat MNT Mongolian Tugrik MOP Macau Pataca MRO Mauritanian Ouguiya MUR Mauritian Rupee MVR Maldivian Rufiyaa MWK Malawi Kwacha MXN Mexican Peso MYR Malaysian Ringgit MZN Mozambican Metical NAD Namibian Dollar NGN Nigerian Naira NIO Nicaragua Cordoba NOK Norwegian Krone NPR Nepalese Rupee NZD New Zealand Dollar OMR Omani Rial PAB Panamanian Balboa PEN Peruvian Nuevo Sol PGK Papua New Guinean Kina PHP Philippine Peso PKR Pakistani Rupee PLN Polish Zloty PYG Paraguayan Guarani QAR Qatari Riyal RON Romanian Leu RSD Serbian Dinar RUB Russian Ruble RWF Rwanda Franc SAR Saudi Riyal SBD Solomon Islands Dollar SCR Seychellois Rupee SDG Sudanese Pound SEK Swedish Krona SGD Singapore Dollar SHP Saint Helena Pound SLL Sierra Leonean Leone SOS Somali Shilling SRD Surinamese Dollar SSP South Sudanese Pound STD Sao Tome and Principe Dobra SYP Syrian Pound SZL Swazi Lilangeni THB Thai Baht TJS Tajikistan Somoni TMT Turkmenistani Manat TND Tunisian Dinar TOP Tonga Paanga TRY Turkish Lira TTD Trinidad and Tobago Dollar TWD New Taiwan Dollar TZS Tanzanian Shilling UAH Ukrainian Hryvnia UGX Ugandan Shilling USD United States Dollar UYU Uruguayan Peso UZS Uzbekistan Som VEF Venezuelan Bolivar VND Vietnamese Dong VUV Vanuatu Vatu WST Samoan Tala XAF Central African CFA franc XCD East Caribbean Dollar XOF West African CFA franc XPF CFP Franc YER Yemeni Rial ZAR South African Rand ZMW Zambian Kwacha """ def convert_currency( from_: str = "USD", to: str = "INR", amount: float = 1.0, api_key: str = API_KEY ) -> str: """https://www.amdoren.com/currency-api/""" params = locals() params["from"] = params.pop("from_") res = requests.get(URL_BASE, params=params).json() return str(res["amount"]) if res["error"] == 0 else res["error_message"] if __name__ == "__main__": print( convert_currency( input("Enter from currency: ").strip(), input("Enter to currency: ").strip(), float(input("Enter the amount: ").strip()), ) )
1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
"""Get the site emails from URL.""" __author__ = "Muhammad Umer Farooq" __license__ = "MIT" __version__ = "1.0.0" __maintainer__ = "Muhammad Umer Farooq" __email__ = "[email protected]" __status__ = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class Parser(HTMLParser): def __init__(self, domain: str): HTMLParser.__init__(self) self.data = [] self.domain = domain def handle_starttag(self, tag: str, attrs: str) -> None: """ This function parse html to take takes url from tags """ # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in data. if value not in self.data: url = parse.urljoin(self.domain, value) self.data.append(url) # Get main domain name (example.com) def get_domain_name(url: str) -> str: """ This function get the main domain name >>> get_domain_name("https://a.b.c.d/e/f?g=h,i=j#k") 'c.d' >>> get_domain_name("Not a URL!") '' """ return ".".join(get_sub_domain_name(url).split(".")[-2:]) # Get sub domain name (sub.example.com) def get_sub_domain_name(url: str) -> str: """ >>> get_sub_domain_name("https://a.b.c.d/e/f?g=h,i=j#k") 'a.b.c.d' >>> get_sub_domain_name("Not a URL!") '' """ return parse.urlparse(url).netloc def emails_from_url(url: str = "https://github.com") -> list: """ This function takes url and return all valid urls """ # Get the base domain from the url domain = get_domain_name(url) # Initialize the parser parser = Parser(domain) try: # Open URL r = requests.get(url) # pass the raw HTML to the parser to get links parser.feed(r.text) # Get links and loop through valid_emails = set() for link in parser.data: # open URL. # read = requests.get(link) try: read = requests.get(link) # Get the valid email. emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) # If not in list then append it. for email in emails: valid_emails.add(email) except ValueError: pass except ValueError: exit(-1) # Finally return a sorted list of email addresses with no duplicates. return sorted(valid_emails) if __name__ == "__main__": emails = emails_from_url("https://github.com") print(f"{len(emails)} emails found:") print("\n".join(sorted(emails)))
"""Get the site emails from URL.""" __author__ = "Muhammad Umer Farooq" __license__ = "MIT" __version__ = "1.0.0" __maintainer__ = "Muhammad Umer Farooq" __email__ = "[email protected]" __status__ = "Alpha" import re from html.parser import HTMLParser from typing import Optional from urllib import parse import requests class Parser(HTMLParser): def __init__(self, domain: str) -> None: super().__init__() self.urls: list[str] = [] self.domain = domain def handle_starttag(self, tag: str, attrs: list[tuple[str, Optional[str]]]) -> None: """ This function parse html to take takes url from tags """ # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: url = parse.urljoin(self.domain, value) self.urls.append(url) # Get main domain name (example.com) def get_domain_name(url: str) -> str: """ This function get the main domain name >>> get_domain_name("https://a.b.c.d/e/f?g=h,i=j#k") 'c.d' >>> get_domain_name("Not a URL!") '' """ return ".".join(get_sub_domain_name(url).split(".")[-2:]) # Get sub domain name (sub.example.com) def get_sub_domain_name(url: str) -> str: """ >>> get_sub_domain_name("https://a.b.c.d/e/f?g=h,i=j#k") 'a.b.c.d' >>> get_sub_domain_name("Not a URL!") '' """ return parse.urlparse(url).netloc def emails_from_url(url: str = "https://github.com") -> list[str]: """ This function takes url and return all valid urls """ # Get the base domain from the url domain = get_domain_name(url) # Initialize the parser parser = Parser(domain) try: # Open URL r = requests.get(url) # pass the raw HTML to the parser to get links parser.feed(r.text) # Get links and loop through valid_emails = set() for link in parser.urls: # open URL. # read = requests.get(link) try: read = requests.get(link) # Get the valid email. emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) # If not in list then append it. for email in emails: valid_emails.add(email) except ValueError: pass except ValueError: exit(-1) # Finally return a sorted list of email addresses with no duplicates. return sorted(valid_emails) if __name__ == "__main__": emails = emails_from_url("https://github.com") print(f"{len(emails)} emails found:") print("\n".join(sorted(emails)))
1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Print all subset combinations of n element in given set of r element. def combination_util(arr, n, r, index, data, i): """ Current combination is ready to be printed, print it arr[] ---> Input Array data[] ---> Temporary array to store current combination start & end ---> Staring and Ending indexes in arr[] index ---> Current index in data[] r ---> Size of a combination to be printed """ if index == r: for j in range(r): print(data[j], end=" ") print(" ") return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location data[index] = arr[i] combination_util(arr, n, r, index + 1, data, i + 1) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(arr, n, r, index, data, i + 1) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def print_combination(arr, n, r): # A temporary array to store all combination one by one data = [0] * r # Print all combination using temporary array 'data[]' combination_util(arr, n, r, 0, data, 0) # Driver function to check for above function arr = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
# Print all subset combinations of n element in given set of r element. def combination_util(arr, n, r, index, data, i): """ Current combination is ready to be printed, print it arr[] ---> Input Array data[] ---> Temporary array to store current combination start & end ---> Staring and Ending indexes in arr[] index ---> Current index in data[] r ---> Size of a combination to be printed """ if index == r: for j in range(r): print(data[j], end=" ") print(" ") return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location data[index] = arr[i] combination_util(arr, n, r, index + 1, data, i + 1) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(arr, n, r, index, data, i + 1) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def print_combination(arr, n, r): # A temporary array to store all combination one by one data = [0] * r # Print all combination using temporary array 'data[]' combination_util(arr, n, r, 0, data, 0) # Driver function to check for above function arr = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Check whether Graph is Bipartite or Not using BFS # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def checkBipartite(graph): queue = [] visited = [False] * len(graph) color = [-1] * len(graph) def bfs(): while queue: u = queue.pop(0) visited[u] = True for neighbour in graph[u]: if neighbour == u: return False if color[neighbour] == -1: color[neighbour] = 1 - color[u] queue.append(neighbour) elif color[neighbour] == color[u]: return False return True for i in range(len(graph)): if not visited[i]: queue.append(i) color[i] = 0 if bfs() is False: return False return True if __name__ == "__main__": # Adjacency List of graph print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
# Check whether Graph is Bipartite or Not using BFS # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def checkBipartite(graph): queue = [] visited = [False] * len(graph) color = [-1] * len(graph) def bfs(): while queue: u = queue.pop(0) visited[u] = True for neighbour in graph[u]: if neighbour == u: return False if color[neighbour] == -1: color[neighbour] = 1 - color[u] queue.append(neighbour) elif color[neighbour] == color[u]: return False return True for i in range(len(graph)): if not visited[i]: queue.append(i) color[i] = 0 if bfs() is False: return False return True if __name__ == "__main__": # Adjacency List of graph print(checkBipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}))
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
alphabet = { "A": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"), "B": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"), "C": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"), "D": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"), "E": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"), "F": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"), "G": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"), "H": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"), "I": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"), "J": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"), "K": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"), "L": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"), "M": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"), "N": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"), "O": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"), "P": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"), "Q": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"), "R": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"), "S": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"), "T": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"), "U": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"), "V": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"), "W": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"), "X": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"), "Y": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"), "Z": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"), } def generate_table(key: str) -> [(str, str)]: """ >>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] """ return [alphabet[char] for char in key.upper()] def encrypt(key: str, words: str) -> str: """ >>> encrypt('marvin', 'jessica') 'QRACRWU' """ cipher = "" count = 0 table = generate_table(key) for char in words.upper(): cipher += get_opponent(table[count], char) count = (count + 1) % len(table) return cipher def decrypt(key: str, words: str) -> str: """ >>> decrypt('marvin', 'QRACRWU') 'JESSICA' """ return encrypt(key, words) def get_position(table: [(str, str)], char: str) -> (int, int) or (None, None): """ >>> table = [ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] >>> get_position(table, 'A') (None, None) """ if char in table[0]: row = 0 else: row = 1 if char in table[1] else -1 return (None, None) if row == -1 else (row, table[row].index(char)) def get_opponent(table: [(str, str)], char: str) -> str: """ >>> table = [ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] >>> get_opponent(table, 'A') 'A' """ row, col = get_position(table, char.upper()) if row == 1: return table[0][col] else: return table[1][col] if row == 0 else char if __name__ == "__main__": import doctest doctest.testmod() # Fist ensure that all our tests are passing... """ ENTER KEY: marvin ENTER TEXT TO ENCRYPT: jessica ENCRYPTED: QRACRWU DECRYPTED WITH KEY: JESSICA """ key = input("ENTER KEY: ").strip() text = input("ENTER TEXT TO ENCRYPT: ").strip() cipher_text = encrypt(key, text) print(f"ENCRYPTED: {cipher_text}") print(f"DECRYPTED WITH KEY: {decrypt(key, cipher_text)}")
alphabet = { "A": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"), "B": ("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"), "C": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"), "D": ("ABCDEFGHIJKLM", "ZNOPQRSTUVWXY"), "E": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"), "F": ("ABCDEFGHIJKLM", "YZNOPQRSTUVWX"), "G": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"), "H": ("ABCDEFGHIJKLM", "XYZNOPQRSTUVW"), "I": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"), "J": ("ABCDEFGHIJKLM", "WXYZNOPQRSTUV"), "K": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"), "L": ("ABCDEFGHIJKLM", "VWXYZNOPQRSTU"), "M": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"), "N": ("ABCDEFGHIJKLM", "UVWXYZNOPQRST"), "O": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"), "P": ("ABCDEFGHIJKLM", "TUVWXYZNOPQRS"), "Q": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"), "R": ("ABCDEFGHIJKLM", "STUVWXYZNOPQR"), "S": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"), "T": ("ABCDEFGHIJKLM", "RSTUVWXYZNOPQ"), "U": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"), "V": ("ABCDEFGHIJKLM", "QRSTUVWXYZNOP"), "W": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"), "X": ("ABCDEFGHIJKLM", "PQRSTUVWXYZNO"), "Y": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"), "Z": ("ABCDEFGHIJKLM", "OPQRSTUVWXYZN"), } def generate_table(key: str) -> [(str, str)]: """ >>> generate_table('marvin') # doctest: +NORMALIZE_WHITESPACE [('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] """ return [alphabet[char] for char in key.upper()] def encrypt(key: str, words: str) -> str: """ >>> encrypt('marvin', 'jessica') 'QRACRWU' """ cipher = "" count = 0 table = generate_table(key) for char in words.upper(): cipher += get_opponent(table[count], char) count = (count + 1) % len(table) return cipher def decrypt(key: str, words: str) -> str: """ >>> decrypt('marvin', 'QRACRWU') 'JESSICA' """ return encrypt(key, words) def get_position(table: [(str, str)], char: str) -> (int, int) or (None, None): """ >>> table = [ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] >>> get_position(table, 'A') (None, None) """ if char in table[0]: row = 0 else: row = 1 if char in table[1] else -1 return (None, None) if row == -1 else (row, table[row].index(char)) def get_opponent(table: [(str, str)], char: str) -> str: """ >>> table = [ ... ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST'), ('ABCDEFGHIJKLM', 'NOPQRSTUVWXYZ'), ... ('ABCDEFGHIJKLM', 'STUVWXYZNOPQR'), ('ABCDEFGHIJKLM', 'QRSTUVWXYZNOP'), ... ('ABCDEFGHIJKLM', 'WXYZNOPQRSTUV'), ('ABCDEFGHIJKLM', 'UVWXYZNOPQRST')] >>> get_opponent(table, 'A') 'A' """ row, col = get_position(table, char.upper()) if row == 1: return table[0][col] else: return table[1][col] if row == 0 else char if __name__ == "__main__": import doctest doctest.testmod() # Fist ensure that all our tests are passing... """ ENTER KEY: marvin ENTER TEXT TO ENCRYPT: jessica ENCRYPTED: QRACRWU DECRYPTED WITH KEY: JESSICA """ key = input("ENTER KEY: ").strip() text = input("ENTER TEXT TO ENCRYPT: ").strip() cipher_text = encrypt(key, text) print(f"ENCRYPTED: {cipher_text}") print(f"DECRYPTED WITH KEY: {decrypt(key, cipher_text)}")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import cv2 import numpy as np """ Harris Corner Detector https://en.wikipedia.org/wiki/Harris_Corner_Detector """ class Harris_Corner: def __init__(self, k: float, window_size: int): """ k : is an empirically determined constant in [0.04,0.06] window_size : neighbourhoods considered """ if k in (0.04, 0.06): self.k = k self.window_size = window_size else: raise ValueError("invalid k value") def __str__(self): return f"Harris Corner detection with k : {self.k}" def detect(self, img_path: str): """ Returns the image with corners identified img_path : path of the image output : list of the corner positions, image """ img = cv2.imread(img_path, 0) h, w = img.shape corner_list = [] color_img = img.copy() color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB) dy, dx = np.gradient(img) ixx = dx ** 2 iyy = dy ** 2 ixy = dx * dy k = 0.04 offset = self.window_size // 2 for y in range(offset, h - offset): for x in range(offset, w - offset): wxx = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wyy = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wxy = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() det = (wxx * wyy) - (wxy ** 2) trace = wxx + wyy r = det - k * (trace ** 2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0), 0) color_img.itemset((y, x, 1), 0) color_img.itemset((y, x, 2), 255) return color_img, corner_list if __name__ == "__main__": edge_detect = Harris_Corner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img)
import cv2 import numpy as np """ Harris Corner Detector https://en.wikipedia.org/wiki/Harris_Corner_Detector """ class Harris_Corner: def __init__(self, k: float, window_size: int): """ k : is an empirically determined constant in [0.04,0.06] window_size : neighbourhoods considered """ if k in (0.04, 0.06): self.k = k self.window_size = window_size else: raise ValueError("invalid k value") def __str__(self): return f"Harris Corner detection with k : {self.k}" def detect(self, img_path: str): """ Returns the image with corners identified img_path : path of the image output : list of the corner positions, image """ img = cv2.imread(img_path, 0) h, w = img.shape corner_list = [] color_img = img.copy() color_img = cv2.cvtColor(color_img, cv2.COLOR_GRAY2RGB) dy, dx = np.gradient(img) ixx = dx ** 2 iyy = dy ** 2 ixy = dx * dy k = 0.04 offset = self.window_size // 2 for y in range(offset, h - offset): for x in range(offset, w - offset): wxx = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wyy = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() wxy = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() det = (wxx * wyy) - (wxy ** 2) trace = wxx + wyy r = det - k * (trace ** 2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0), 0) color_img.itemset((y, x, 1), 0) color_img.itemset((y, x, 2), 255) return color_img, corner_list if __name__ == "__main__": edge_detect = Harris_Corner(0.04, 3) color_img, _ = edge_detect.detect("path_to_image") cv2.imwrite("detect.png", color_img)
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
"""Absolute Value.""" def abs_val(num): """ Find the absolute value of a number. >>> abs_val(-5.1) 5.1 >>> abs_val(-5) == abs_val(5) True >>> abs_val(0) 0 """ return -num if num < 0 else num def test_abs_val(): """ >>> test_abs_val() """ assert 0 == abs_val(0) assert 34 == abs_val(34) assert 100000000000 == abs_val(-100000000000) if __name__ == "__main__": print(abs_val(-34)) # --> 34
"""Absolute Value.""" def abs_val(num): """ Find the absolute value of a number. >>> abs_val(-5.1) 5.1 >>> abs_val(-5) == abs_val(5) True >>> abs_val(0) 0 """ return -num if num < 0 else num def test_abs_val(): """ >>> test_abs_val() """ assert 0 == abs_val(0) assert 34 == abs_val(34) assert 100000000000 == abs_val(-100000000000) if __name__ == "__main__": print(abs_val(-34)) # --> 34
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Given a sorted array of integers, return indices of the two numbers such that they add up to a specific target using the two pointers technique. You may assume that each input would have exactly one solution, and you may not use the same element twice. This is an alternative solution of the two-sum problem, which uses a map to solve the problem. Hence can not solve the issue if there is a constraint not use the same index twice. [1] Example: Given nums = [2, 7, 11, 15], target = 9, Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. [1]: https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py """ from __future__ import annotations def two_pointer(nums: list[int], target: int) -> list[int]: """ >>> two_pointer([2, 7, 11, 15], 9) [0, 1] >>> two_pointer([2, 7, 11, 15], 17) [0, 3] >>> two_pointer([2, 7, 11, 15], 18) [1, 2] >>> two_pointer([2, 7, 11, 15], 26) [2, 3] >>> two_pointer([1, 3, 3], 6) [1, 2] >>> two_pointer([2, 7, 11, 15], 8) [] >>> two_pointer([3 * i for i in range(10)], 19) [] >>> two_pointer([1, 2, 3], 6) [] """ i = 0 j = len(nums) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: i = i + 1 else: j = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"{two_pointer([2, 7, 11, 15], 9) = }")
""" Given a sorted array of integers, return indices of the two numbers such that they add up to a specific target using the two pointers technique. You may assume that each input would have exactly one solution, and you may not use the same element twice. This is an alternative solution of the two-sum problem, which uses a map to solve the problem. Hence can not solve the issue if there is a constraint not use the same index twice. [1] Example: Given nums = [2, 7, 11, 15], target = 9, Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. [1]: https://github.com/TheAlgorithms/Python/blob/master/other/two_sum.py """ from __future__ import annotations def two_pointer(nums: list[int], target: int) -> list[int]: """ >>> two_pointer([2, 7, 11, 15], 9) [0, 1] >>> two_pointer([2, 7, 11, 15], 17) [0, 3] >>> two_pointer([2, 7, 11, 15], 18) [1, 2] >>> two_pointer([2, 7, 11, 15], 26) [2, 3] >>> two_pointer([1, 3, 3], 6) [1, 2] >>> two_pointer([2, 7, 11, 15], 8) [] >>> two_pointer([3 * i for i in range(10)], 19) [] >>> two_pointer([1, 2, 3], 6) [] """ i = 0 j = len(nums) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: i = i + 1 else: j = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"{two_pointer([2, 7, 11, 15], 9) = }")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3. Find the sum of the only eleven primes that are both truncatable from left to right and right to left. NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes. """ from __future__ import annotations seive = [True] * 1000001 seive[1] = False i = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 1 <= n <= 1000000 >>> is_prime(87) False >>> is_prime(1) False >>> is_prime(25363) False """ return seive[n] def list_truncated_nums(n: int) -> list[int]: """ Returns a list of all left and right truncated numbers of n >>> list_truncated_nums(927628) [927628, 27628, 92762, 7628, 9276, 628, 927, 28, 92, 8, 9] >>> list_truncated_nums(467) [467, 67, 46, 7, 4] >>> list_truncated_nums(58) [58, 8, 5] """ str_num = str(n) list_nums = [n] for i in range(1, len(str_num)): list_nums.append(int(str_num[i:])) list_nums.append(int(str_num[:-i])) return list_nums def validate(n: int) -> bool: """ To optimize the approach, we will rule out the numbers above 1000, whose first or last three digits are not prime >>> validate(74679) False >>> validate(235693) False >>> validate(3797) True """ if len(str(n)) > 3: if not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])): return False return True def compute_truncated_primes(count: int = 11) -> list[int]: """ Returns the list of truncated primes >>> compute_truncated_primes(11) [23, 37, 53, 73, 313, 317, 373, 797, 3137, 3797, 739397] """ list_truncated_primes = [] num = 13 while len(list_truncated_primes) != count: if validate(num): list_nums = list_truncated_nums(num) if all(is_prime(i) for i in list_nums): list_truncated_primes.append(num) num += 2 return list_truncated_primes def solution() -> int: """ Returns the sum of truncated primes """ return sum(compute_truncated_primes(11)) if __name__ == "__main__": print(f"{sum(compute_truncated_primes(11)) = }")
""" The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3. Find the sum of the only eleven primes that are both truncatable from left to right and right to left. NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes. """ from __future__ import annotations seive = [True] * 1000001 seive[1] = False i = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 1 <= n <= 1000000 >>> is_prime(87) False >>> is_prime(1) False >>> is_prime(25363) False """ return seive[n] def list_truncated_nums(n: int) -> list[int]: """ Returns a list of all left and right truncated numbers of n >>> list_truncated_nums(927628) [927628, 27628, 92762, 7628, 9276, 628, 927, 28, 92, 8, 9] >>> list_truncated_nums(467) [467, 67, 46, 7, 4] >>> list_truncated_nums(58) [58, 8, 5] """ str_num = str(n) list_nums = [n] for i in range(1, len(str_num)): list_nums.append(int(str_num[i:])) list_nums.append(int(str_num[:-i])) return list_nums def validate(n: int) -> bool: """ To optimize the approach, we will rule out the numbers above 1000, whose first or last three digits are not prime >>> validate(74679) False >>> validate(235693) False >>> validate(3797) True """ if len(str(n)) > 3: if not is_prime(int(str(n)[-3:])) or not is_prime(int(str(n)[:3])): return False return True def compute_truncated_primes(count: int = 11) -> list[int]: """ Returns the list of truncated primes >>> compute_truncated_primes(11) [23, 37, 53, 73, 313, 317, 373, 797, 3137, 3797, 739397] """ list_truncated_primes = [] num = 13 while len(list_truncated_primes) != count: if validate(num): list_nums = list_truncated_nums(num) if all(is_prime(i) for i in list_nums): list_truncated_primes.append(num) num += 2 return list_truncated_primes def solution() -> int: """ Returns the sum of truncated primes """ return sum(compute_truncated_primes(11)) if __name__ == "__main__": print(f"{sum(compute_truncated_primes(11)) = }")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Check whether Graph is Bipartite or Not using DFS # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def check_bipartite_dfs(graph): visited = [False] * len(graph) color = [-1] * len(graph) def dfs(v, c): visited[v] = True color[v] = c for u in graph[v]: if not visited[u]: dfs(u, 1 - c) for i in range(len(graph)): if not visited[i]: dfs(i, 0) for i in range(len(graph)): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
# Check whether Graph is Bipartite or Not using DFS # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def check_bipartite_dfs(graph): visited = [False] * len(graph) color = [-1] * len(graph) def dfs(v, c): visited[v] = True color[v] = c for u in graph[v]: if not visited[u]: dfs(u, 1 - c) for i in range(len(graph)): if not visited[i]: dfs(i, 0) for i in range(len(graph)): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
r""" Problem: The n queens problem is of placing N queens on a N * N chess board such that no queen can attack any other queens placed on that chess board. This means that one queen cannot have any other queen on its horizontal, vertical and diagonal lines. Solution: To solve this problem we will use simple math. First we know the queen can move in all the possible ways, we can simplify it in this: vertical, horizontal, diagonal left and diagonal right. We can visualize it like this: left diagonal = \ right diagonal = / On a chessboard vertical movement could be the rows and horizontal movement could be the columns. In programming we can use an array, and in this array each index could be the rows and each value in the array could be the column. For example: . Q . . We have this chessboard with one queen in each column and each queen . . . Q can't attack to each other. Q . . . The array for this example would look like this: [1, 3, 0, 2] . . Q . So if we use an array and we verify that each value in the array is different to each other we know that at least the queens can't attack each other in horizontal and vertical. At this point we have that halfway completed and we will treat the chessboard as a Cartesian plane. Hereinafter we are going to remember basic math, so in the school we learned this formula: Slope of a line: y2 - y1 m = ---------- x2 - x1 This formula allow us to get the slope. For the angles 45º (right diagonal) and 135º (left diagonal) this formula gives us m = 1, and m = -1 respectively. See:: https://www.enotes.com/homework-help/write-equation-line-that-hits-origin-45-degree-1474860 Then we have this another formula: Slope intercept: y = mx + b b is where the line crosses the Y axis (to get more information see: https://www.mathsisfun.com/y_intercept.html), if we change the formula to solve for b we would have: y - mx = b And like we already have the m values for the angles 45º and 135º, this formula would look like this: 45º: y - (1)x = b 45º: y - x = b 135º: y - (-1)x = b 135º: y + x = b y = row x = column Applying this two formulas we can check if a queen in some position is being attacked for another one or vice versa. """ from typing import List def depth_first_search( possible_board: List[int], diagonal_right_collisions: List[int], diagonal_left_collisions: List[int], boards: List[List[str]], n: int, ) -> None: """ >>> boards = [] >>> depth_first_search([], [], [], boards, 4) >>> for board in boards: ... print(board) ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] ['. . Q . ', 'Q . . . ', '. . . Q ', '. Q . . '] """ # Get next row in the current board (possible_board) to fill it with a queen row = len(possible_board) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board]) return # We iterate each column in the row to find all possible results in each row for col in range(n): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( possible_board + [col], diagonal_right_collisions + [row - col], diagonal_left_collisions + [row + col], boards, n, ) def n_queens_solution(n: int) -> None: boards: List[List[str]] = [] depth_first_search([], [], [], boards, n) # Print all the boards for board in boards: for column in board: print(column) print("") print(len(boards), "solutions were found.") if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
r""" Problem: The n queens problem is of placing N queens on a N * N chess board such that no queen can attack any other queens placed on that chess board. This means that one queen cannot have any other queen on its horizontal, vertical and diagonal lines. Solution: To solve this problem we will use simple math. First we know the queen can move in all the possible ways, we can simplify it in this: vertical, horizontal, diagonal left and diagonal right. We can visualize it like this: left diagonal = \ right diagonal = / On a chessboard vertical movement could be the rows and horizontal movement could be the columns. In programming we can use an array, and in this array each index could be the rows and each value in the array could be the column. For example: . Q . . We have this chessboard with one queen in each column and each queen . . . Q can't attack to each other. Q . . . The array for this example would look like this: [1, 3, 0, 2] . . Q . So if we use an array and we verify that each value in the array is different to each other we know that at least the queens can't attack each other in horizontal and vertical. At this point we have that halfway completed and we will treat the chessboard as a Cartesian plane. Hereinafter we are going to remember basic math, so in the school we learned this formula: Slope of a line: y2 - y1 m = ---------- x2 - x1 This formula allow us to get the slope. For the angles 45º (right diagonal) and 135º (left diagonal) this formula gives us m = 1, and m = -1 respectively. See:: https://www.enotes.com/homework-help/write-equation-line-that-hits-origin-45-degree-1474860 Then we have this another formula: Slope intercept: y = mx + b b is where the line crosses the Y axis (to get more information see: https://www.mathsisfun.com/y_intercept.html), if we change the formula to solve for b we would have: y - mx = b And like we already have the m values for the angles 45º and 135º, this formula would look like this: 45º: y - (1)x = b 45º: y - x = b 135º: y - (-1)x = b 135º: y + x = b y = row x = column Applying this two formulas we can check if a queen in some position is being attacked for another one or vice versa. """ from typing import List def depth_first_search( possible_board: List[int], diagonal_right_collisions: List[int], diagonal_left_collisions: List[int], boards: List[List[str]], n: int, ) -> None: """ >>> boards = [] >>> depth_first_search([], [], [], boards, 4) >>> for board in boards: ... print(board) ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] ['. . Q . ', 'Q . . . ', '. . . Q ', '. Q . . '] """ # Get next row in the current board (possible_board) to fill it with a queen row = len(possible_board) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board]) return # We iterate each column in the row to find all possible results in each row for col in range(n): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( possible_board + [col], diagonal_right_collisions + [row - col], diagonal_left_collisions + [row + col], boards, n, ) def n_queens_solution(n: int) -> None: boards: List[List[str]] = [] depth_first_search([], [], [], boards, n) # Print all the boards for board in boards: for column in board: print(column) print("") print(len(boards), "solutions were found.") if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" The Reverse Polish Nation also known as Polish postfix notation or simply postfix notation. https://en.wikipedia.org/wiki/Reverse_Polish_notation Classic examples of simple stack implementations Valid operators are +, -, *, /. Each operand may be an integer or another expression. """ def evaluate_postfix(postfix_notation: list) -> int: """ >>> evaluate_postfix(["2", "1", "+", "3", "*"]) 9 >>> evaluate_postfix(["4", "13", "5", "/", "+"]) 6 >>> evaluate_postfix([]) 0 """ if not postfix_notation: return 0 operations = {"+", "-", "*", "/"} stack = [] for token in postfix_notation: if token in operations: b, a = stack.pop(), stack.pop() if token == "+": stack.append(a + b) elif token == "-": stack.append(a - b) elif token == "*": stack.append(a * b) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1) else: stack.append(a // b) else: stack.append(int(token)) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
""" The Reverse Polish Nation also known as Polish postfix notation or simply postfix notation. https://en.wikipedia.org/wiki/Reverse_Polish_notation Classic examples of simple stack implementations Valid operators are +, -, *, /. Each operand may be an integer or another expression. """ def evaluate_postfix(postfix_notation: list) -> int: """ >>> evaluate_postfix(["2", "1", "+", "3", "*"]) 9 >>> evaluate_postfix(["4", "13", "5", "/", "+"]) 6 >>> evaluate_postfix([]) 0 """ if not postfix_notation: return 0 operations = {"+", "-", "*", "/"} stack = [] for token in postfix_notation: if token in operations: b, a = stack.pop(), stack.pop() if token == "+": stack.append(a + b) elif token == "-": stack.append(a - b) elif token == "*": stack.append(a * b) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1) else: stack.append(a // b) else: stack.append(int(token)) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Graph Coloring also called "m coloring problem" consists of coloring given graph with at most m colors such that no adjacent vertices are assigned same color Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ from typing import List def valid_coloring( neighbours: List[int], colored_vertices: List[int], color: int ) -> bool: """ For each neighbour check if coloring constraint is satisfied If any of the neighbours fail the constraint return False If all neighbours validate constraint return True >>> neighbours = [0,1,0,1,0] >>> colored_vertices = [0, 2, 1, 2, 0] >>> color = 1 >>> valid_coloring(neighbours, colored_vertices, color) True >>> color = 2 >>> valid_coloring(neighbours, colored_vertices, color) False """ # Does any neighbour not satisfy the constraints return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(neighbours) ) def util_color( graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int ) -> bool: """ Pseudo-Code Base Case: 1. Check if coloring is complete 1.1 If complete return True (meaning that we successfully colored graph) Recursive Step: 2. Itterates over each color: Check if current coloring is valid: 2.1. Color given vertex 2.2. Do recursive call check if this coloring leads to solving problem 2.4. if current coloring leads to solution return 2.5. Uncolor given vertex >>> graph = [[0, 1, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 1, 0, 1, 0], ... [0, 1, 1, 0, 0], ... [0, 1, 0, 0, 0]] >>> max_colors = 3 >>> colored_vertices = [0, 1, 0, 0, 0] >>> index = 3 >>> util_color(graph, max_colors, colored_vertices, index) True >>> max_colors = 2 >>> util_color(graph, max_colors, colored_vertices, index) False """ # Base Case if index == len(graph): return True # Recursive Step for i in range(max_colors): if valid_coloring(graph[index], colored_vertices, i): # Color current vertex colored_vertices[index] = i # Validate coloring if util_color(graph, max_colors, colored_vertices, index + 1): return True # Backtrack colored_vertices[index] = -1 return False def color(graph: List[List[int]], max_colors: int) -> List[int]: """ Wrapper function to call subroutine called util_color which will either return True or False. If True is returned colored_vertices list is filled with correct colorings >>> graph = [[0, 1, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 1, 0, 1, 0], ... [0, 1, 1, 0, 0], ... [0, 1, 0, 0, 0]] >>> max_colors = 3 >>> color(graph, max_colors) [0, 1, 0, 2, 0] >>> max_colors = 2 >>> color(graph, max_colors) [] """ colored_vertices = [-1] * len(graph) if util_color(graph, max_colors, colored_vertices, 0): return colored_vertices return []
""" Graph Coloring also called "m coloring problem" consists of coloring given graph with at most m colors such that no adjacent vertices are assigned same color Wikipedia: https://en.wikipedia.org/wiki/Graph_coloring """ from typing import List def valid_coloring( neighbours: List[int], colored_vertices: List[int], color: int ) -> bool: """ For each neighbour check if coloring constraint is satisfied If any of the neighbours fail the constraint return False If all neighbours validate constraint return True >>> neighbours = [0,1,0,1,0] >>> colored_vertices = [0, 2, 1, 2, 0] >>> color = 1 >>> valid_coloring(neighbours, colored_vertices, color) True >>> color = 2 >>> valid_coloring(neighbours, colored_vertices, color) False """ # Does any neighbour not satisfy the constraints return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(neighbours) ) def util_color( graph: List[List[int]], max_colors: int, colored_vertices: List[int], index: int ) -> bool: """ Pseudo-Code Base Case: 1. Check if coloring is complete 1.1 If complete return True (meaning that we successfully colored graph) Recursive Step: 2. Itterates over each color: Check if current coloring is valid: 2.1. Color given vertex 2.2. Do recursive call check if this coloring leads to solving problem 2.4. if current coloring leads to solution return 2.5. Uncolor given vertex >>> graph = [[0, 1, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 1, 0, 1, 0], ... [0, 1, 1, 0, 0], ... [0, 1, 0, 0, 0]] >>> max_colors = 3 >>> colored_vertices = [0, 1, 0, 0, 0] >>> index = 3 >>> util_color(graph, max_colors, colored_vertices, index) True >>> max_colors = 2 >>> util_color(graph, max_colors, colored_vertices, index) False """ # Base Case if index == len(graph): return True # Recursive Step for i in range(max_colors): if valid_coloring(graph[index], colored_vertices, i): # Color current vertex colored_vertices[index] = i # Validate coloring if util_color(graph, max_colors, colored_vertices, index + 1): return True # Backtrack colored_vertices[index] = -1 return False def color(graph: List[List[int]], max_colors: int) -> List[int]: """ Wrapper function to call subroutine called util_color which will either return True or False. If True is returned colored_vertices list is filled with correct colorings >>> graph = [[0, 1, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 1, 0, 1, 0], ... [0, 1, 1, 0, 0], ... [0, 1, 0, 0, 0]] >>> max_colors = 3 >>> color(graph, max_colors) [0, 1, 0, 2, 0] >>> max_colors = 2 >>> color(graph, max_colors) [] """ colored_vertices = [-1] * len(graph) if util_color(graph, max_colors, colored_vertices, 0): return colored_vertices return []
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
def max_difference(a: list[int]) -> tuple[int, int]: """ We are given an array A[1..n] of integers, n >= 1. We want to find a pair of indices (i, j) such that 1 <= i <= j <= n and A[j] - A[i] is as large as possible. Explanation: https://www.geeksforgeeks.org/maximum-difference-between-two-elements/ >>> max_difference([5, 11, 2, 1, 7, 9, 0, 7]) (1, 9) """ # base case if len(a) == 1: return a[0], a[0] else: # split A into half. first = a[: len(a) // 2] second = a[len(a) // 2 :] # 2 sub problems, 1/2 of original size. small1, big1 = max_difference(first) small2, big2 = max_difference(second) # get min of first and max of second # linear time min_first = min(first) max_second = max(second) # 3 cases, either (small1, big1), # (min_first, max_second), (small2, big2) # constant comparisons if big2 - small2 > max_second - min_first and big2 - small2 > big1 - small1: return small2, big2 elif big1 - small1 > max_second - min_first: return small1, big1 else: return min_first, max_second if __name__ == "__main__": import doctest doctest.testmod()
def max_difference(a: list[int]) -> tuple[int, int]: """ We are given an array A[1..n] of integers, n >= 1. We want to find a pair of indices (i, j) such that 1 <= i <= j <= n and A[j] - A[i] is as large as possible. Explanation: https://www.geeksforgeeks.org/maximum-difference-between-two-elements/ >>> max_difference([5, 11, 2, 1, 7, 9, 0, 7]) (1, 9) """ # base case if len(a) == 1: return a[0], a[0] else: # split A into half. first = a[: len(a) // 2] second = a[len(a) // 2 :] # 2 sub problems, 1/2 of original size. small1, big1 = max_difference(first) small2, big2 = max_difference(second) # get min of first and max of second # linear time min_first = min(first) max_second = max(second) # 3 cases, either (small1, big1), # (min_first, max_second), (small2, big2) # constant comparisons if big2 - small2 > max_second - min_first and big2 - small2 > big1 - small1: return small2, big2 elif big1 - small1 > max_second - min_first: return small1, big1 else: return min_first, max_second if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" This is a Python implementation of the levenshtein distance. Levenshtein distance is a string metric for measuring the difference between two sequences. For doctests run following command: python -m doctest -v levenshtein-distance.py or python3 -m doctest -v levenshtein-distance.py For manual testing run: python levenshtein-distance.py """ def levenshtein_distance(first_word: str, second_word: str) -> int: """Implementation of the levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. Examples: >>> levenshtein_distance("planet", "planetary") 3 >>> levenshtein_distance("", "test") 4 >>> levenshtein_distance("book", "back") 2 >>> levenshtein_distance("book", "book") 0 >>> levenshtein_distance("test", "") 4 >>> levenshtein_distance("", "") 0 >>> levenshtein_distance("orchestration", "container") 10 """ # The longer word should come first if len(first_word) < len(second_word): return levenshtein_distance(second_word, first_word) if len(second_word) == 0: return len(first_word) previous_row = range(len(second_word) + 1) for i, c1 in enumerate(first_word): current_row = [i + 1] for j, c2 in enumerate(second_word): # Calculate insertions, deletions and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) # Get the minimum to append to the current row current_row.append(min(insertions, deletions, substitutions)) # Store the previous row previous_row = current_row # Returns the last element (distance) return previous_row[-1] if __name__ == "__main__": first_word = input("Enter the first word:\n").strip() second_word = input("Enter the second word:\n").strip() result = levenshtein_distance(first_word, second_word) print(f"Levenshtein distance between {first_word} and {second_word} is {result}")
""" This is a Python implementation of the levenshtein distance. Levenshtein distance is a string metric for measuring the difference between two sequences. For doctests run following command: python -m doctest -v levenshtein-distance.py or python3 -m doctest -v levenshtein-distance.py For manual testing run: python levenshtein-distance.py """ def levenshtein_distance(first_word: str, second_word: str) -> int: """Implementation of the levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. Examples: >>> levenshtein_distance("planet", "planetary") 3 >>> levenshtein_distance("", "test") 4 >>> levenshtein_distance("book", "back") 2 >>> levenshtein_distance("book", "book") 0 >>> levenshtein_distance("test", "") 4 >>> levenshtein_distance("", "") 0 >>> levenshtein_distance("orchestration", "container") 10 """ # The longer word should come first if len(first_word) < len(second_word): return levenshtein_distance(second_word, first_word) if len(second_word) == 0: return len(first_word) previous_row = range(len(second_word) + 1) for i, c1 in enumerate(first_word): current_row = [i + 1] for j, c2 in enumerate(second_word): # Calculate insertions, deletions and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) # Get the minimum to append to the current row current_row.append(min(insertions, deletions, substitutions)) # Store the previous row previous_row = current_row # Returns the last element (distance) return previous_row[-1] if __name__ == "__main__": first_word = input("Enter the first word:\n").strip() second_word = input("Enter the second word:\n").strip() result = levenshtein_distance(first_word, second_word) print(f"Levenshtein distance between {first_word} and {second_word} is {result}")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 3: https://projecteuler.net/problem=3 Largest prime factor The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143? References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ import math def isprime(num: int) -> bool: """ Returns boolean representing primality of given number num. >>> isprime(2) True >>> isprime(3) True >>> isprime(27) False >>> isprime(2999) True >>> isprime(0) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. >>> isprime(1) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. """ if num <= 1: raise ValueError("Parameter num must be greater than or equal to two.") if num == 2: return True elif num % 2 == 0: return False for i in range(3, int(math.sqrt(num)) + 1, 2): if num % i == 0: return False return True def solution(n: int = 600851475143) -> int: """ Returns the largest prime factor of a given number n. >>> solution(13195) 29 >>> solution(10) 5 >>> solution(17) 17 >>> solution(3.4) 3 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") max_number = 0 if isprime(n): return n while n % 2 == 0: n //= 2 if isprime(n): return n for i in range(3, int(math.sqrt(n)) + 1, 2): if n % i == 0: if isprime(n / i): max_number = n / i break elif isprime(i): max_number = i return max_number if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 3: https://projecteuler.net/problem=3 Largest prime factor The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143? References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ import math def isprime(num: int) -> bool: """ Returns boolean representing primality of given number num. >>> isprime(2) True >>> isprime(3) True >>> isprime(27) False >>> isprime(2999) True >>> isprime(0) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. >>> isprime(1) Traceback (most recent call last): ... ValueError: Parameter num must be greater than or equal to two. """ if num <= 1: raise ValueError("Parameter num must be greater than or equal to two.") if num == 2: return True elif num % 2 == 0: return False for i in range(3, int(math.sqrt(num)) + 1, 2): if num % i == 0: return False return True def solution(n: int = 600851475143) -> int: """ Returns the largest prime factor of a given number n. >>> solution(13195) 29 >>> solution(10) 5 >>> solution(17) 17 >>> solution(3.4) 3 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") max_number = 0 if isprime(n): return n while n % 2 == 0: n //= 2 if isprime(n): return n for i in range(3, int(math.sqrt(n)) + 1, 2): if n % i == 0: if isprime(n / i): max_number = n / i break elif isprime(i): max_number = i return max_number if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import random import string class ShuffledShiftCipher: """ This algorithm uses the Caesar Cipher algorithm but removes the option to use brute force to decrypt the message. The passcode is a a random password from the selection buffer of 1. uppercase letters of the English alphabet 2. lowercase letters of the English alphabet 3. digits from 0 to 9 Using unique characters from the passcode, the normal list of characters, that can be allowed in the plaintext, is pivoted and shuffled. Refer to docstring of __make_key_list() to learn more about the shuffling. Then, using the passcode, a number is calculated which is used to encrypt the plaintext message with the normal shift cipher method, only in this case, the reference, to look back at while decrypting, is shuffled. Each cipher object can possess an optional argument as passcode, without which a new passcode is generated for that object automatically. cip1 = ShuffledShiftCipher('d4usr9TWxw9wMD') cip2 = ShuffledShiftCipher() """ def __init__(self, passcode: str = None): """ Initializes a cipher object with a passcode as it's entity Note: No new passcode is generated if user provides a passcode while creating the object """ self.__passcode = passcode or self.__passcode_creator() self.__key_list = self.__make_key_list() self.__shift_key = self.__make_shift_key() def __str__(self): """ :return: passcode of the cipher object """ return "Passcode is: " + "".join(self.__passcode) def __neg_pos(self, iterlist: list) -> list: """ Mutates the list by changing the sign of each alternate element :param iterlist: takes a list iterable :return: the mutated list """ for i in range(1, len(iterlist), 2): iterlist[i] *= -1 return iterlist def __passcode_creator(self) -> list: """ Creates a random password from the selection buffer of 1. uppercase letters of the English alphabet 2. lowercase letters of the English alphabet 3. digits from 0 to 9 :rtype: list :return: a password of a random length between 10 to 20 """ choices = string.ascii_letters + string.digits password = [random.choice(choices) for i in range(random.randint(10, 20))] return password def __make_key_list(self) -> list: """ Shuffles the ordered character choices by pivoting at breakpoints Breakpoints are the set of characters in the passcode eg: if, ABCDEFGHIJKLMNOPQRSTUVWXYZ are the possible characters and CAMERA is the passcode then, breakpoints = [A,C,E,M,R] # sorted set of characters from passcode shuffled parts: [A,CB,ED,MLKJIHGF,RQPON,ZYXWVUTS] shuffled __key_list : ACBEDMLKJIHGFRQPONZYXWVUTS Shuffling only 26 letters of the english alphabet can generate 26! combinations for the shuffled list. In the program we consider, a set of 97 characters (including letters, digits, punctuation and whitespaces), thereby creating a possibility of 97! combinations (which is a 152 digit number in itself), thus diminishing the possibility of a brute force approach. Moreover, shift keys even introduce a multiple of 26 for a brute force approach for each of the already 97! combinations. """ # key_list_options contain nearly all printable except few elements from # string.whitespace key_list_options = ( string.ascii_letters + string.digits + string.punctuation + " \t\n" ) keys_l = [] # creates points known as breakpoints to break the key_list_options at those # points and pivot each substring breakpoints = sorted(set(self.__passcode)) temp_list = [] # algorithm for creating a new shuffled list, keys_l, out of key_list_options for i in key_list_options: temp_list.extend(i) # checking breakpoints at which to pivot temporary sublist and add it into # keys_l if i in breakpoints or i == key_list_options[-1]: keys_l.extend(temp_list[::-1]) temp_list = [] # returning a shuffled keys_l to prevent brute force guessing of shift key return keys_l def __make_shift_key(self) -> int: """ sum() of the mutated list of ascii values of all characters where the mutated list is the one returned by __neg_pos() """ num = sum(self.__neg_pos([ord(x) for x in self.__passcode])) return num if num > 0 else len(self.__passcode) def decrypt(self, encoded_message: str) -> str: """ Performs shifting of the encoded_message w.r.t. the shuffled __key_list to create the decoded_message >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44') >>> ssc.decrypt("d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#") 'Hello, this is a modified Caesar cipher' """ decoded_message = "" # decoding shift like Caesar cipher algorithm implementing negative shift or # reverse shift or left shift for i in encoded_message: position = self.__key_list.index(i) decoded_message += self.__key_list[ (position - self.__shift_key) % -len(self.__key_list) ] return decoded_message def encrypt(self, plaintext: str) -> str: """ Performs shifting of the plaintext w.r.t. the shuffled __key_list to create the encoded_message >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44') >>> ssc.encrypt('Hello, this is a modified Caesar cipher') "d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#" """ encoded_message = "" # encoding shift like Caesar cipher algorithm implementing positive shift or # forward shift or right shift for i in plaintext: position = self.__key_list.index(i) encoded_message += self.__key_list[ (position + self.__shift_key) % len(self.__key_list) ] return encoded_message def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"): """ >>> test_end_to_end() 'Hello, this is a modified Caesar cipher' """ cip1 = ShuffledShiftCipher() return cip1.decrypt(cip1.encrypt(msg)) if __name__ == "__main__": import doctest doctest.testmod()
import random import string class ShuffledShiftCipher: """ This algorithm uses the Caesar Cipher algorithm but removes the option to use brute force to decrypt the message. The passcode is a a random password from the selection buffer of 1. uppercase letters of the English alphabet 2. lowercase letters of the English alphabet 3. digits from 0 to 9 Using unique characters from the passcode, the normal list of characters, that can be allowed in the plaintext, is pivoted and shuffled. Refer to docstring of __make_key_list() to learn more about the shuffling. Then, using the passcode, a number is calculated which is used to encrypt the plaintext message with the normal shift cipher method, only in this case, the reference, to look back at while decrypting, is shuffled. Each cipher object can possess an optional argument as passcode, without which a new passcode is generated for that object automatically. cip1 = ShuffledShiftCipher('d4usr9TWxw9wMD') cip2 = ShuffledShiftCipher() """ def __init__(self, passcode: str = None): """ Initializes a cipher object with a passcode as it's entity Note: No new passcode is generated if user provides a passcode while creating the object """ self.__passcode = passcode or self.__passcode_creator() self.__key_list = self.__make_key_list() self.__shift_key = self.__make_shift_key() def __str__(self): """ :return: passcode of the cipher object """ return "Passcode is: " + "".join(self.__passcode) def __neg_pos(self, iterlist: list) -> list: """ Mutates the list by changing the sign of each alternate element :param iterlist: takes a list iterable :return: the mutated list """ for i in range(1, len(iterlist), 2): iterlist[i] *= -1 return iterlist def __passcode_creator(self) -> list: """ Creates a random password from the selection buffer of 1. uppercase letters of the English alphabet 2. lowercase letters of the English alphabet 3. digits from 0 to 9 :rtype: list :return: a password of a random length between 10 to 20 """ choices = string.ascii_letters + string.digits password = [random.choice(choices) for i in range(random.randint(10, 20))] return password def __make_key_list(self) -> list: """ Shuffles the ordered character choices by pivoting at breakpoints Breakpoints are the set of characters in the passcode eg: if, ABCDEFGHIJKLMNOPQRSTUVWXYZ are the possible characters and CAMERA is the passcode then, breakpoints = [A,C,E,M,R] # sorted set of characters from passcode shuffled parts: [A,CB,ED,MLKJIHGF,RQPON,ZYXWVUTS] shuffled __key_list : ACBEDMLKJIHGFRQPONZYXWVUTS Shuffling only 26 letters of the english alphabet can generate 26! combinations for the shuffled list. In the program we consider, a set of 97 characters (including letters, digits, punctuation and whitespaces), thereby creating a possibility of 97! combinations (which is a 152 digit number in itself), thus diminishing the possibility of a brute force approach. Moreover, shift keys even introduce a multiple of 26 for a brute force approach for each of the already 97! combinations. """ # key_list_options contain nearly all printable except few elements from # string.whitespace key_list_options = ( string.ascii_letters + string.digits + string.punctuation + " \t\n" ) keys_l = [] # creates points known as breakpoints to break the key_list_options at those # points and pivot each substring breakpoints = sorted(set(self.__passcode)) temp_list = [] # algorithm for creating a new shuffled list, keys_l, out of key_list_options for i in key_list_options: temp_list.extend(i) # checking breakpoints at which to pivot temporary sublist and add it into # keys_l if i in breakpoints or i == key_list_options[-1]: keys_l.extend(temp_list[::-1]) temp_list = [] # returning a shuffled keys_l to prevent brute force guessing of shift key return keys_l def __make_shift_key(self) -> int: """ sum() of the mutated list of ascii values of all characters where the mutated list is the one returned by __neg_pos() """ num = sum(self.__neg_pos([ord(x) for x in self.__passcode])) return num if num > 0 else len(self.__passcode) def decrypt(self, encoded_message: str) -> str: """ Performs shifting of the encoded_message w.r.t. the shuffled __key_list to create the decoded_message >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44') >>> ssc.decrypt("d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#") 'Hello, this is a modified Caesar cipher' """ decoded_message = "" # decoding shift like Caesar cipher algorithm implementing negative shift or # reverse shift or left shift for i in encoded_message: position = self.__key_list.index(i) decoded_message += self.__key_list[ (position - self.__shift_key) % -len(self.__key_list) ] return decoded_message def encrypt(self, plaintext: str) -> str: """ Performs shifting of the plaintext w.r.t. the shuffled __key_list to create the encoded_message >>> ssc = ShuffledShiftCipher('4PYIXyqeQZr44') >>> ssc.encrypt('Hello, this is a modified Caesar cipher') "d>**-1z6&'5z'5z:z+-='$'>=zp:>5:#z<'.&>#" """ encoded_message = "" # encoding shift like Caesar cipher algorithm implementing positive shift or # forward shift or right shift for i in plaintext: position = self.__key_list.index(i) encoded_message += self.__key_list[ (position + self.__shift_key) % len(self.__key_list) ] return encoded_message def test_end_to_end(msg: str = "Hello, this is a modified Caesar cipher"): """ >>> test_end_to_end() 'Hello, this is a modified Caesar cipher' """ cip1 = ShuffledShiftCipher() return cip1.decrypt(cip1.encrypt(msg)) if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset dataset = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) X = dataset.iloc[:, 1:2].values y = dataset.iloc[:, 2].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() pol_reg.fit(X_poly, y) # Visualizing the Polymonial Regression results def viz_polymonial(): plt.scatter(X, y, color="red") plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") plt.title("Truth or Bluff (Linear Regression)") plt.xlabel("Position level") plt.ylabel("Salary") plt.show() return if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset dataset = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) X = dataset.iloc[:, 1:2].values y = dataset.iloc[:, 2].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) poly_reg = PolynomialFeatures(degree=4) X_poly = poly_reg.fit_transform(X) pol_reg = LinearRegression() pol_reg.fit(X_poly, y) # Visualizing the Polymonial Regression results def viz_polymonial(): plt.scatter(X, y, color="red") plt.plot(X, pol_reg.predict(poly_reg.fit_transform(X)), color="blue") plt.title("Truth or Bluff (Linear Regression)") plt.xlabel("Position level") plt.ylabel("Salary") plt.show() return if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
from typing import Any class ContainsLoopError(Exception): pass class Node: def __init__(self, data: Any) -> None: self.data = data self.next_node = None def __iter__(self): node = self visited = [] while node: if node in visited: raise ContainsLoopError visited.append(node) yield node.data node = node.next_node @property def has_loop(self) -> bool: """ A loop is when the exact same Node appears more than once in a linked list. >>> root_node = Node(1) >>> root_node.next_node = Node(2) >>> root_node.next_node.next_node = Node(3) >>> root_node.next_node.next_node.next_node = Node(4) >>> root_node.has_loop False >>> root_node.next_node.next_node.next_node = root_node.next_node >>> root_node.has_loop True """ try: list(self) return False except ContainsLoopError: return True if __name__ == "__main__": root_node = Node(1) root_node.next_node = Node(2) root_node.next_node.next_node = Node(3) root_node.next_node.next_node.next_node = Node(4) print(root_node.has_loop) # False root_node.next_node.next_node.next_node = root_node.next_node print(root_node.has_loop) # True root_node = Node(5) root_node.next_node = Node(6) root_node.next_node.next_node = Node(5) root_node.next_node.next_node.next_node = Node(6) print(root_node.has_loop) # False root_node = Node(1) print(root_node.has_loop) # False
from typing import Any class ContainsLoopError(Exception): pass class Node: def __init__(self, data: Any) -> None: self.data = data self.next_node = None def __iter__(self): node = self visited = [] while node: if node in visited: raise ContainsLoopError visited.append(node) yield node.data node = node.next_node @property def has_loop(self) -> bool: """ A loop is when the exact same Node appears more than once in a linked list. >>> root_node = Node(1) >>> root_node.next_node = Node(2) >>> root_node.next_node.next_node = Node(3) >>> root_node.next_node.next_node.next_node = Node(4) >>> root_node.has_loop False >>> root_node.next_node.next_node.next_node = root_node.next_node >>> root_node.has_loop True """ try: list(self) return False except ContainsLoopError: return True if __name__ == "__main__": root_node = Node(1) root_node.next_node = Node(2) root_node.next_node.next_node = Node(3) root_node.next_node.next_node.next_node = Node(4) print(root_node.has_loop) # False root_node.next_node.next_node.next_node = root_node.next_node print(root_node.has_loop) # True root_node = Node(5) root_node.next_node = Node(6) root_node.next_node.next_node = Node(5) root_node.next_node.next_node.next_node = Node(6) print(root_node.has_loop) # False root_node = Node(1) print(root_node.has_loop) # False
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import os from itertools import chain from random import randrange, shuffle import pytest from .sol1 import PokerHand SORTED_HANDS = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) TEST_COMPARE = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) TEST_FLUSH = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) TEST_STRAIGHT = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) TEST_FIVE_HIGH_STRAIGHT = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) TEST_KIND = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) TEST_TYPES = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def generate_random_hand(): play, oppo = randrange(len(SORTED_HANDS)), randrange(len(SORTED_HANDS)) expected = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] hand, other = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def generate_random_hands(number_of_hands: int = 100): return (generate_random_hand() for _ in range(number_of_hands)) @pytest.mark.parametrize("hand, expected", TEST_FLUSH) def test_hand_is_flush(hand, expected): assert PokerHand(hand)._is_flush() == expected @pytest.mark.parametrize("hand, expected", TEST_STRAIGHT) def test_hand_is_straight(hand, expected): assert PokerHand(hand)._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values", TEST_FIVE_HIGH_STRAIGHT) def test_hand_is_five_high_straight(hand, expected, card_values): player = PokerHand(hand) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected", TEST_KIND) def test_hand_is_same_kind(hand, expected): assert PokerHand(hand)._is_same_kind() == expected @pytest.mark.parametrize("hand, expected", TEST_TYPES) def test_hand_values(hand, expected): assert PokerHand(hand)._hand_type == expected @pytest.mark.parametrize("hand, other, expected", TEST_COMPARE) def test_compare_simple(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected @pytest.mark.parametrize("hand, other, expected", generate_random_hands()) def test_compare_random(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected def test_hand_sorted(): POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] list_copy = POKER_HANDS.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) for index, hand in enumerate(user_sorted): assert hand == POKER_HANDS[index] def test_custom_sort_five_high_straight(): # Test that five high straights are compared correctly. pokerhands = [PokerHand("2D AC 3H 4H 5S"), PokerHand("2S 3H 4H 5S 6C")] pokerhands.sort(reverse=True) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def test_multiple_calls_five_high_straight(): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. pokerhand = PokerHand("2C 4S AS 3D 5C") expected = True expected_card_values = [5, 4, 3, 2, 14] for _ in range(10): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def test_euler_project(): # Problem number 54 from Project Euler # Testing from poker_hands.txt file answer = 0 script_dir = os.path.abspath(os.path.dirname(__file__)) poker_hands = os.path.join(script_dir, "poker_hands.txt") with open(poker_hands) as file_hand: for line in file_hand: player_hand = line[:14].strip() opponent_hand = line[15:].strip() player, opponent = PokerHand(player_hand), PokerHand(opponent_hand) output = player.compare_with(opponent) if output == "Win": answer += 1 assert answer == 376
import os from itertools import chain from random import randrange, shuffle import pytest from .sol1 import PokerHand SORTED_HANDS = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) TEST_COMPARE = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) TEST_FLUSH = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) TEST_STRAIGHT = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) TEST_FIVE_HIGH_STRAIGHT = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) TEST_KIND = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) TEST_TYPES = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def generate_random_hand(): play, oppo = randrange(len(SORTED_HANDS)), randrange(len(SORTED_HANDS)) expected = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] hand, other = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def generate_random_hands(number_of_hands: int = 100): return (generate_random_hand() for _ in range(number_of_hands)) @pytest.mark.parametrize("hand, expected", TEST_FLUSH) def test_hand_is_flush(hand, expected): assert PokerHand(hand)._is_flush() == expected @pytest.mark.parametrize("hand, expected", TEST_STRAIGHT) def test_hand_is_straight(hand, expected): assert PokerHand(hand)._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values", TEST_FIVE_HIGH_STRAIGHT) def test_hand_is_five_high_straight(hand, expected, card_values): player = PokerHand(hand) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected", TEST_KIND) def test_hand_is_same_kind(hand, expected): assert PokerHand(hand)._is_same_kind() == expected @pytest.mark.parametrize("hand, expected", TEST_TYPES) def test_hand_values(hand, expected): assert PokerHand(hand)._hand_type == expected @pytest.mark.parametrize("hand, other, expected", TEST_COMPARE) def test_compare_simple(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected @pytest.mark.parametrize("hand, other, expected", generate_random_hands()) def test_compare_random(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected def test_hand_sorted(): POKER_HANDS = [PokerHand(hand) for hand in SORTED_HANDS] list_copy = POKER_HANDS.copy() shuffle(list_copy) user_sorted = chain(sorted(list_copy)) for index, hand in enumerate(user_sorted): assert hand == POKER_HANDS[index] def test_custom_sort_five_high_straight(): # Test that five high straights are compared correctly. pokerhands = [PokerHand("2D AC 3H 4H 5S"), PokerHand("2S 3H 4H 5S 6C")] pokerhands.sort(reverse=True) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def test_multiple_calls_five_high_straight(): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. pokerhand = PokerHand("2C 4S AS 3D 5C") expected = True expected_card_values = [5, 4, 3, 2, 14] for _ in range(10): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def test_euler_project(): # Problem number 54 from Project Euler # Testing from poker_hands.txt file answer = 0 script_dir = os.path.abspath(os.path.dirname(__file__)) poker_hands = os.path.join(script_dir, "poker_hands.txt") with open(poker_hands) as file_hand: for line in file_hand: player_hand = line[:14].strip() opponent_hand = line[15:].strip() player, opponent = PokerHand(player_hand), PokerHand(opponent_hand) output = player.compare_with(opponent) if output == "Win": answer += 1 assert answer == 376
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# NguyenU def find_max(nums): """ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): ... find_max(nums) == max(nums) True True True True """ max_num = nums[0] for x in nums: if x > max_num: max_num = x return max_num def main(): print(find_max([2, 4, 9, 7, 19, 94, 5])) # 94 if __name__ == "__main__": main()
# NguyenU def find_max(nums): """ >>> for nums in ([3, 2, 1], [-3, -2, -1], [3, -3, 0], [3.0, 3.1, 2.9]): ... find_max(nums) == max(nums) True True True True """ max_num = nums[0] for x in nums: if x > max_num: max_num = x return max_num def main(): print(find_max([2, 4, 9, 7, 19, 94, 5])) # 94 if __name__ == "__main__": main()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Numbers of alphabet which we call base alphabet_size = 256 # Modulus to hash a string modulus = 1000003 def rabin_karp(pattern: str, text: str) -> bool: """ The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match """ p_len = len(pattern) t_len = len(text) if p_len > t_len: return False p_hash = 0 text_hash = 0 modulus_power = 1 # Calculating the hash of pattern and substring of text for i in range(p_len): p_hash = (ord(pattern[i]) + p_hash * alphabet_size) % modulus text_hash = (ord(text[i]) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue modulus_power = (modulus_power * alphabet_size) % modulus for i in range(0, t_len - p_len + 1): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash text_hash = ( (text_hash - ord(text[i]) * modulus_power) * alphabet_size + ord(text[i + p_len]) ) % modulus return False def test_rabin_karp() -> None: """ >>> test_rabin_karp() Success. """ # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" assert rabin_karp(pattern, text) # Test 3) pattern = "AAAB" text = "ABAAAAAB" assert rabin_karp(pattern, text) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" assert rabin_karp(pattern, text) # Test 5) pattern = "Lü" text = "Lüsai" assert rabin_karp(pattern, text) pattern = "Lue" assert not rabin_karp(pattern, text) print("Success.") if __name__ == "__main__": test_rabin_karp()
# Numbers of alphabet which we call base alphabet_size = 256 # Modulus to hash a string modulus = 1000003 def rabin_karp(pattern: str, text: str) -> bool: """ The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match """ p_len = len(pattern) t_len = len(text) if p_len > t_len: return False p_hash = 0 text_hash = 0 modulus_power = 1 # Calculating the hash of pattern and substring of text for i in range(p_len): p_hash = (ord(pattern[i]) + p_hash * alphabet_size) % modulus text_hash = (ord(text[i]) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue modulus_power = (modulus_power * alphabet_size) % modulus for i in range(0, t_len - p_len + 1): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash text_hash = ( (text_hash - ord(text[i]) * modulus_power) * alphabet_size + ord(text[i + p_len]) ) % modulus return False def test_rabin_karp() -> None: """ >>> test_rabin_karp() Success. """ # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" assert rabin_karp(pattern, text) # Test 3) pattern = "AAAB" text = "ABAAAAAB" assert rabin_karp(pattern, text) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" assert rabin_karp(pattern, text) # Test 5) pattern = "Lü" text = "Lüsai" assert rabin_karp(pattern, text) pattern = "Lue" assert not rabin_karp(pattern, text) print("Success.") if __name__ == "__main__": test_rabin_karp()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" wiki: https://en.wikipedia.org/wiki/Anagram """ def check_anagrams(first_str: str, second_str: str) -> bool: """ Two strings are anagrams if they are made of the same letters arranged differently (ignoring the case). >>> check_anagrams('Silent', 'Listen') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('There', 'Their') False """ return ( "".join(sorted(first_str.lower())).strip() == "".join(sorted(second_str.lower())).strip() ) if __name__ == "__main__": from doctest import testmod testmod() input_A = input("Enter the first string ").strip() input_B = input("Enter the second string ").strip() status = check_anagrams(input_A, input_B) print(f"{input_A} and {input_B} are {'' if status else 'not '}anagrams.")
""" wiki: https://en.wikipedia.org/wiki/Anagram """ def check_anagrams(first_str: str, second_str: str) -> bool: """ Two strings are anagrams if they are made of the same letters arranged differently (ignoring the case). >>> check_anagrams('Silent', 'Listen') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('There', 'Their') False """ return ( "".join(sorted(first_str.lower())).strip() == "".join(sorted(second_str.lower())).strip() ) if __name__ == "__main__": from doctest import testmod testmod() input_A = input("Enter the first string ").strip() input_B = input("Enter the second string ").strip() status = check_anagrams(input_A, input_B) print(f"{input_A} and {input_B} are {'' if status else 'not '}anagrams.")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
#
#
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Test cases: Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make in Indian Currency: 987 Following is minimal change for 987 : 500 100 100 100 100 50 20 10 5 2 Do you want to enter your denominations ? (Y/N) :Y Enter number of denomination:10 1 5 10 20 50 100 200 500 1000 2000 Enter the change you want to make: 18745 Following is minimal change for 18745 : 2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make: 0 The total value cannot be zero or negative. Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make: -98 The total value cannot be zero or negative. Do you want to enter your denominations ? (Y/N) :Y Enter number of denomination:5 1 5 100 500 1000 Enter the change you want to make: 456 Following is minimal change for 456 : 100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 """ def find_minimum_change(denominations: list[int], value: int) -> list[int]: """ Find the minimum change from the given denominations and value >>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745) [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 500, 200, 20, 20, 5] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 987) [500, 100, 100, 100, 100, 50, 20, 10, 5, 2] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 0) [] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], -98) [] >>> find_minimum_change([1, 5, 100, 500, 1000], 456) [100, 100, 100, 100, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1] """ total_value = int(value) # Initialize Result answer = [] # Traverse through all denomination for denomination in reversed(denominations): # Find denominations while int(total_value) >= int(denomination): total_value -= int(denomination) answer.append(denomination) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": denominations = list() value = 0 if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): n = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) value = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter denominations = [1, 2, 5, 10, 20, 50, 100, 500, 2000] value = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f"Following is minimal change for {value}: ") answer = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
""" Test cases: Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make in Indian Currency: 987 Following is minimal change for 987 : 500 100 100 100 100 50 20 10 5 2 Do you want to enter your denominations ? (Y/N) :Y Enter number of denomination:10 1 5 10 20 50 100 200 500 1000 2000 Enter the change you want to make: 18745 Following is minimal change for 18745 : 2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make: 0 The total value cannot be zero or negative. Do you want to enter your denominations ? (Y/N) :N Enter the change you want to make: -98 The total value cannot be zero or negative. Do you want to enter your denominations ? (Y/N) :Y Enter number of denomination:5 1 5 100 500 1000 Enter the change you want to make: 456 Following is minimal change for 456 : 100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 """ def find_minimum_change(denominations: list[int], value: int) -> list[int]: """ Find the minimum change from the given denominations and value >>> find_minimum_change([1, 5, 10, 20, 50, 100, 200, 500, 1000,2000], 18745) [2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 500, 200, 20, 20, 5] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 987) [500, 100, 100, 100, 100, 50, 20, 10, 5, 2] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], 0) [] >>> find_minimum_change([1, 2, 5, 10, 20, 50, 100, 500, 2000], -98) [] >>> find_minimum_change([1, 5, 100, 500, 1000], 456) [100, 100, 100, 100, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1] """ total_value = int(value) # Initialize Result answer = [] # Traverse through all denomination for denomination in reversed(denominations): # Find denominations while int(total_value) >= int(denomination): total_value -= int(denomination) answer.append(denomination) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": denominations = list() value = 0 if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): n = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) value = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter denominations = [1, 2, 5, 10, 20, 50, 100, 500, 2000] value = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f"Following is minimal change for {value}: ") answer = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ def __init__(self, key=None): # Stores actual heap items. self.arr = list() # Stores indexes of each item for supporting updates and deletion. self.pos_map = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) def _parent(self, i): """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None def _left(self, i): """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None def _right(self, i): """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None def _swap(self, i, j): """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] def _cmp(self, i, j): """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] def _get_valid_parent(self, i): """ Returns index of valid parent as per desired ordering among given index and both it's children """ left = self._left(i) right = self._right(i) valid_parent = i if left is not None and not self._cmp(left, valid_parent): valid_parent = left if right is not None and not self._cmp(right, valid_parent): valid_parent = right return valid_parent def _heapify_up(self, index): """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) def _heapify_down(self, index): """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) def update_item(self, item, item_value): """Updates given item value in heap if present""" if item not in self.pos_map: return index = self.pos_map[item] self.arr[index] = [item, self.key(item_value)] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(index) self._heapify_down(index) def delete_item(self, item): """Deletes given item from heap if present""" if item not in self.pos_map: return index = self.pos_map[item] del self.pos_map[item] self.arr[index] = self.arr[self.size - 1] self.pos_map[self.arr[self.size - 1][0]] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(index) self._heapify_down(index) def insert_item(self, item, item_value): """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: self.arr.append([item, self.key(item_value)]) else: self.arr[self.size] = [item, self.key(item_value)] self.pos_map[item] = self.size self.size += 1 self._heapify_up(self.size - 1) def get_top(self): """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None def extract_top(self): """ Return top item tuple (Calculated value, item) from heap and removes it as well if present """ top_item_tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0]) return top_item_tuple def test_heap() -> None: """ >>> h = Heap() # Max-heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [7, 37] >>> h.extract_top() [7, 37] >>> h.extract_top() [5, 34] >>> h.extract_top() [6, 31] >>> h = Heap(key=lambda x: -x) # Min heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [6, -31] >>> h.extract_top() [6, -31] >>> h.extract_top() [5, -34] >>> h.extract_top() [7, -37] >>> h.insert_item(8, 45) >>> h.insert_item(9, 40) >>> h.insert_item(10, 50) >>> h.get_top() [9, -40] >>> h.update_item(10, 30) >>> h.get_top() [10, -30] >>> h.delete_item(10) >>> h.get_top() [9, -40] """ pass if __name__ == "__main__": import doctest doctest.testmod()
class Heap: """ A generic Heap class, can be used as min or max by passing the key function accordingly. """ def __init__(self, key=None): # Stores actual heap items. self.arr = list() # Stores indexes of each item for supporting updates and deletion. self.pos_map = {} # Stores current size of heap. self.size = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. self.key = key or (lambda x: x) def _parent(self, i): """Returns parent index of given index if exists else None""" return int((i - 1) / 2) if i > 0 else None def _left(self, i): """Returns left-child-index of given index if exists else None""" left = int(2 * i + 1) return left if 0 < left < self.size else None def _right(self, i): """Returns right-child-index of given index if exists else None""" right = int(2 * i + 2) return right if 0 < right < self.size else None def _swap(self, i, j): """Performs changes required for swapping two elements in the heap""" # First update the indexes of the items in index map. self.pos_map[self.arr[i][0]], self.pos_map[self.arr[j][0]] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. self.arr[i], self.arr[j] = self.arr[j], self.arr[i] def _cmp(self, i, j): """Compares the two items using default comparison""" return self.arr[i][1] < self.arr[j][1] def _get_valid_parent(self, i): """ Returns index of valid parent as per desired ordering among given index and both it's children """ left = self._left(i) right = self._right(i) valid_parent = i if left is not None and not self._cmp(left, valid_parent): valid_parent = left if right is not None and not self._cmp(right, valid_parent): valid_parent = right return valid_parent def _heapify_up(self, index): """Fixes the heap in upward direction of given index""" parent = self._parent(index) while parent is not None and not self._cmp(index, parent): self._swap(index, parent) index, parent = parent, self._parent(parent) def _heapify_down(self, index): """Fixes the heap in downward direction of given index""" valid_parent = self._get_valid_parent(index) while valid_parent != index: self._swap(index, valid_parent) index, valid_parent = valid_parent, self._get_valid_parent(valid_parent) def update_item(self, item, item_value): """Updates given item value in heap if present""" if item not in self.pos_map: return index = self.pos_map[item] self.arr[index] = [item, self.key(item_value)] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(index) self._heapify_down(index) def delete_item(self, item): """Deletes given item from heap if present""" if item not in self.pos_map: return index = self.pos_map[item] del self.pos_map[item] self.arr[index] = self.arr[self.size - 1] self.pos_map[self.arr[self.size - 1][0]] = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(index) self._heapify_down(index) def insert_item(self, item, item_value): """Inserts given item with given value in heap""" arr_len = len(self.arr) if arr_len == self.size: self.arr.append([item, self.key(item_value)]) else: self.arr[self.size] = [item, self.key(item_value)] self.pos_map[item] = self.size self.size += 1 self._heapify_up(self.size - 1) def get_top(self): """Returns top item tuple (Calculated value, item) from heap if present""" return self.arr[0] if self.size else None def extract_top(self): """ Return top item tuple (Calculated value, item) from heap and removes it as well if present """ top_item_tuple = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0]) return top_item_tuple def test_heap() -> None: """ >>> h = Heap() # Max-heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [7, 37] >>> h.extract_top() [7, 37] >>> h.extract_top() [5, 34] >>> h.extract_top() [6, 31] >>> h = Heap(key=lambda x: -x) # Min heap >>> h.insert_item(5, 34) >>> h.insert_item(6, 31) >>> h.insert_item(7, 37) >>> h.get_top() [6, -31] >>> h.extract_top() [6, -31] >>> h.extract_top() [5, -34] >>> h.extract_top() [7, -37] >>> h.insert_item(8, 45) >>> h.insert_item(9, 40) >>> h.insert_item(10, 50) >>> h.get_top() [9, -40] >>> h.update_item(10, 30) >>> h.get_top() [10, -30] >>> h.delete_item(10) >>> h.get_top() [9, -40] """ pass if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,297
[mypy] Fix web_programming directory
A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
cclauss
"2021-03-28T01:58:51Z"
"2021-03-31T03:18:07Z"
c22c7d503be5f48ae257c648f7b83b8a80a02738
895bca36541598a04dba525568a20d2282e0ffd9
[mypy] Fix web_programming directory. A subtask of #4052 ### **Describe your change:** * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [x] All new Python files are placed inside an existing directory. * [x] All filenames are in all lowercase characters with no spaces or dashes. * [x] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Problem 46: https://projecteuler.net/problem=46 It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. 9 = 7 + 2 × 12 15 = 7 + 2 × 22 21 = 3 + 2 × 32 25 = 7 + 2 × 32 27 = 19 + 2 × 22 33 = 31 + 2 × 12 It turns out that the conjecture was false. What is the smallest odd composite that cannot be written as the sum of a prime and twice a square? """ from __future__ import annotations seive = [True] * 100001 i = 2 while i * i <= 100000: if seive[i]: for j in range(i * i, 100001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 2 <= n <= 100000 >>> is_prime(87) False >>> is_prime(23) True >>> is_prime(25363) False """ return seive[n] odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)] def compute_nums(n: int) -> list[int]: """ Returns a list of first n odd composite numbers which do not follow the conjecture. >>> compute_nums(1) [5777] >>> compute_nums(2) [5777, 5993] >>> compute_nums(0) Traceback (most recent call last): ... ValueError: n must be >= 0 >>> compute_nums("a") Traceback (most recent call last): ... ValueError: n must be an integer >>> compute_nums(1.1) Traceback (most recent call last): ... ValueError: n must be an integer """ if not isinstance(n, int): raise ValueError("n must be an integer") if n <= 0: raise ValueError("n must be >= 0") list_nums = [] for num in range(len(odd_composites)): i = 0 while 2 * i * i <= odd_composites[num]: rem = odd_composites[num] - 2 * i * i if is_prime(rem): break i += 1 else: list_nums.append(odd_composites[num]) if len(list_nums) == n: return list_nums def solution() -> int: """Return the solution to the problem""" return compute_nums(1)[0] if __name__ == "__main__": print(f"{solution() = }")
""" Problem 46: https://projecteuler.net/problem=46 It was proposed by Christian Goldbach that every odd composite number can be written as the sum of a prime and twice a square. 9 = 7 + 2 × 12 15 = 7 + 2 × 22 21 = 3 + 2 × 32 25 = 7 + 2 × 32 27 = 19 + 2 × 22 33 = 31 + 2 × 12 It turns out that the conjecture was false. What is the smallest odd composite that cannot be written as the sum of a prime and twice a square? """ from __future__ import annotations seive = [True] * 100001 i = 2 while i * i <= 100000: if seive[i]: for j in range(i * i, 100001, i): seive[j] = False i += 1 def is_prime(n: int) -> bool: """ Returns True if n is prime, False otherwise, for 2 <= n <= 100000 >>> is_prime(87) False >>> is_prime(23) True >>> is_prime(25363) False """ return seive[n] odd_composites = [num for num in range(3, len(seive), 2) if not is_prime(num)] def compute_nums(n: int) -> list[int]: """ Returns a list of first n odd composite numbers which do not follow the conjecture. >>> compute_nums(1) [5777] >>> compute_nums(2) [5777, 5993] >>> compute_nums(0) Traceback (most recent call last): ... ValueError: n must be >= 0 >>> compute_nums("a") Traceback (most recent call last): ... ValueError: n must be an integer >>> compute_nums(1.1) Traceback (most recent call last): ... ValueError: n must be an integer """ if not isinstance(n, int): raise ValueError("n must be an integer") if n <= 0: raise ValueError("n must be >= 0") list_nums = [] for num in range(len(odd_composites)): i = 0 while 2 * i * i <= odd_composites[num]: rem = odd_composites[num] - 2 * i * i if is_prime(rem): break i += 1 else: list_nums.append(odd_composites[num]) if len(list_nums) == n: return list_nums def solution() -> int: """Return the solution to the problem""" return compute_nums(1)[0] if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
name: "build" on: pull_request: schedule: - cron: "0 0 * * *" # Run everyday jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: "3.9" - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in other directories and add them here - run: mypy --ignore-missing-imports backtracking bit_manipulation blockchain boolean_algebra cellular_automata compression computer_vision divide_and_conquer electronics file_transfer fractals fuzzy_logic genetic_algorithm geodesy knapsack networking_flow quantum scheduling sorts - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
name: "build" on: pull_request: schedule: - cron: "0 0 * * *" # Run everyday jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: python-version: "3.9" - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} - name: Install dependencies run: | python -m pip install --upgrade pip setuptools six wheel python -m pip install mypy pytest-cov -r requirements.txt # FIXME: #4052 fix mypy errors in other directories and add them here - run: mypy --ignore-missing-imports backtracking bit_manipulation blockchain boolean_algebra cellular_automata compression computer_vision divide_and_conquer electronics file_transfer fractals fuzzy_logic genetic_algorithm geodesy knapsack machine_learning networking_flow neural_network quantum scheduling sorts traversals - name: Run tests run: pytest --doctest-modules --ignore=project_euler/ --ignore=scripts/ --cov-report=term-missing:skip-covered --cov=. . - if: ${{ success() }} run: scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" this is code for forecasting but i modified it and used it for safety checker of data for ex: you have a online shop and for some reason some data are missing (the amount of data that u expected are not supposed to be) then we can use it *ps : 1. ofc we can use normal statistic method but in this case the data is quite absurd and only a little^^ 2. ofc u can use this and modified it for forecasting purpose for the next 3 months sales or something, u can just adjust it for ur own purpose """ import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def linear_regression_prediction( train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list ) -> float: """ First method: linear regression input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors True """ x = [[1, item, train_mtch[i]] for i, item in enumerate(train_dt)] x = np.array(x) y = np.array(train_usr) beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]) def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float: """ second method: Sarimax sarimax is a statistic method which using previous input and learn its pattern to predict future data input : training data (total_user, with exog data = total_event) in list of float output : list of total user prediction in float >>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2]) 6.6666671111109626 """ order = (1, 2, 1) seasonal_order = (1, 1, 0, 7) model = SARIMAX( train_user, exog=train_match, order=order, seasonal_order=seasonal_order ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) return result[0] def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: """ Third method: Support vector regressor svr is quite the same with svm(support vector machine) it uses the same principles as the SVM for classification, with only a few minor differences and the only different is that it suits better for regression purpose input : training data (date, total_user, total_event) in list of float where x = list of set (date and total event) output : list of total user prediction in float >>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4]) 1.634932078116079 """ regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) return y_pred[0] def interquartile_range_checker(train_user: list) -> float: """ Optional method: interquatile range input : list of total user in float output : low limit of input in float this method can be used to check whether some data is outlier or not >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10]) 2.8 """ train_user.sort() q1 = np.percentile(train_user, 25) q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) return low_lim def data_safety_checker(list_vote: list, actual_result: float) -> None: """ Used to review all the votes (list result prediction) and compare it to the actual result. input : list of predictions output : print whether it's safe or not >>> data_safety_checker([2,3,4],5.0) Today's data is not safe. """ safe = 0 not_safe = 0 for i in list_vote: if i > actual_result: safe = not_safe + 1 else: if abs(abs(i) - abs(actual_result)) <= 0.1: safe = safe + 1 else: not_safe = not_safe + 1 print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.") # data_input_df = pd.read_csv("ex_data.csv", header=None) data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"]) """ data column = total user in a day, how much online event held in one day, what day is that(sunday-saturday) """ # start normalization normalize_df = Normalizer().fit_transform(data_input_df.values) # split data total_date = normalize_df[:, 2].tolist() total_user = normalize_df[:, 0].tolist() total_match = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) x = normalize_df[:, [1, 2]].tolist() x_train = x[: len(x) - 1] x_test = x[len(x) - 1 :] # for linear reression & sarimax trn_date = total_date[: len(total_date) - 1] trn_user = total_user[: len(total_user) - 1] trn_match = total_match[: len(total_match) - 1] tst_date = total_date[len(total_date) - 1 :] tst_user = total_user[len(total_user) - 1 :] tst_match = total_match[len(total_match) - 1 :] # voting system with forecasting res_vote = [] res_vote.append( linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match) ) res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match)) res_vote.append(support_vector_regressor(x_train, x_test, trn_user)) # check the safety of todays'data^^ data_safety_checker(res_vote, tst_user)
""" this is code for forecasting but i modified it and used it for safety checker of data for ex: you have a online shop and for some reason some data are missing (the amount of data that u expected are not supposed to be) then we can use it *ps : 1. ofc we can use normal statistic method but in this case the data is quite absurd and only a little^^ 2. ofc u can use this and modified it for forecasting purpose for the next 3 months sales or something, u can just adjust it for ur own purpose """ import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def linear_regression_prediction( train_dt: list, train_usr: list, train_mtch: list, test_dt: list, test_mtch: list ) -> float: """ First method: linear regression input : training data (date, total_user, total_event) in list of float output : list of total user prediction in float >>> n = linear_regression_prediction([2,3,4,5], [5,3,4,6], [3,1,2,4], [2,1], [2,2]) >>> abs(n - 5.0) < 1e-6 # Checking precision because of floating point errors True """ x = np.array([[1, item, train_mtch[i]] for i, item in enumerate(train_dt)]) y = np.array(train_usr) beta = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), x)), x.transpose()), y) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2]) def sarimax_predictor(train_user: list, train_match: list, test_match: list) -> float: """ second method: Sarimax sarimax is a statistic method which using previous input and learn its pattern to predict future data input : training data (total_user, with exog data = total_event) in list of float output : list of total user prediction in float >>> sarimax_predictor([4,2,6,8], [3,1,2,4], [2]) 6.6666671111109626 """ order = (1, 2, 1) seasonal_order = (1, 1, 0, 7) model = SARIMAX( train_user, exog=train_match, order=order, seasonal_order=seasonal_order ) model_fit = model.fit(disp=False, maxiter=600, method="nm") result = model_fit.predict(1, len(test_match), exog=[test_match]) return result[0] def support_vector_regressor(x_train: list, x_test: list, train_user: list) -> float: """ Third method: Support vector regressor svr is quite the same with svm(support vector machine) it uses the same principles as the SVM for classification, with only a few minor differences and the only different is that it suits better for regression purpose input : training data (date, total_user, total_event) in list of float where x = list of set (date and total event) output : list of total user prediction in float >>> support_vector_regressor([[5,2],[1,5],[6,2]], [[3,2]], [2,1,4]) 1.634932078116079 """ regressor = SVR(kernel="rbf", C=1, gamma=0.1, epsilon=0.1) regressor.fit(x_train, train_user) y_pred = regressor.predict(x_test) return y_pred[0] def interquartile_range_checker(train_user: list) -> float: """ Optional method: interquatile range input : list of total user in float output : low limit of input in float this method can be used to check whether some data is outlier or not >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10]) 2.8 """ train_user.sort() q1 = np.percentile(train_user, 25) q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) return low_lim def data_safety_checker(list_vote: list, actual_result: float) -> None: """ Used to review all the votes (list result prediction) and compare it to the actual result. input : list of predictions output : print whether it's safe or not >>> data_safety_checker([2,3,4],5.0) Today's data is not safe. """ safe = 0 not_safe = 0 for i in list_vote: if i > actual_result: safe = not_safe + 1 else: if abs(abs(i) - abs(actual_result)) <= 0.1: safe = safe + 1 else: not_safe = not_safe + 1 print(f"Today's data is {'not ' if safe <= not_safe else ''}safe.") # data_input_df = pd.read_csv("ex_data.csv", header=None) data_input = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] data_input_df = pd.DataFrame(data_input, columns=["total_user", "total_even", "days"]) """ data column = total user in a day, how much online event held in one day, what day is that(sunday-saturday) """ # start normalization normalize_df = Normalizer().fit_transform(data_input_df.values) # split data total_date = normalize_df[:, 2].tolist() total_user = normalize_df[:, 0].tolist() total_match = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) x = normalize_df[:, [1, 2]].tolist() x_train = x[: len(x) - 1] x_test = x[len(x) - 1 :] # for linear reression & sarimax trn_date = total_date[: len(total_date) - 1] trn_user = total_user[: len(total_user) - 1] trn_match = total_match[: len(total_match) - 1] tst_date = total_date[len(total_date) - 1 :] tst_user = total_user[len(total_user) - 1 :] tst_match = total_match[len(total_match) - 1 :] # voting system with forecasting res_vote = [] res_vote.append( linear_regression_prediction(trn_date, trn_user, trn_match, tst_date, tst_match) ) res_vote.append(sarimax_predictor(trn_user, trn_match, tst_match)) res_vote.append(support_vector_regressor(x_train, x_test, trn_user)) # check the safety of todays'data^^ data_safety_checker(res_vote, tst_user)
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
"""README, Author - Anurag Kumar(mailto:[email protected]) Requirements: - sklearn - numpy - matplotlib Python: - 3.5 Inputs: - X , a 2D numpy array of features. - k , number of clusters to create. - initial_centroids , initial centroid values generated by utility function(mentioned in usage). - maxiter , maximum number of iterations to process. - heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func. Usage: 1. define 'k' value, 'X' features array and 'hetrogeneity' empty list 2. create initial_centroids, initial_centroids = get_initial_centroids( X, k, seed=0 # seed value for initial centroid generation, # None for randomness(default=None) ) 3. find centroids and clusters using kmeans function. centroids, cluster_assignment = kmeans( X, k, initial_centroids, maxiter=400, record_heterogeneity=heterogeneity, verbose=True # whether to print logs in console or not.(default=False) ) 4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list. plot_heterogeneity( heterogeneity, k ) 5. Transfers Dataframe into excel format it must have feature called 'Clust' with k means clustering numbers in it. """ import warnings import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.metrics import pairwise_distances warnings.filterwarnings("ignore") TAG = "K-MEANS-CLUST/ " def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" if seed is not None: # useful for obtaining consistent results np.random.seed(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). rand_indices = np.random.randint(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, # it will carry a nonzero weight in the TF-IDF vector of the centroid. centroids = data[rand_indices, :] return centroids def centroid_pairwise_dist(X, centroids): return pairwise_distances(X, centroids, metric="euclidean") def assign_clusters(data, centroids): # Compute distances between each data point and the set of centroids: # Fill in the blank (RHS only) distances_from_centroids = centroid_pairwise_dist(data, centroids) # Compute cluster assignments for each data point: # Fill in the blank (RHS only) cluster_assignment = np.argmin(distances_from_centroids, axis=1) return cluster_assignment def revise_centroids(data, k, cluster_assignment): new_centroids = [] for i in range(k): # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i] # Compute the mean of the data points. Fill in the blank (RHS only) centroid = member_data_points.mean(axis=0) new_centroids.append(centroid) new_centroids = np.array(new_centroids) return new_centroids def compute_heterogeneity(data, k, centroids, cluster_assignment): heterogeneity = 0.0 for i in range(k): # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i, :] if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty # Compute distances from centroid to data points (RHS only) distances = pairwise_distances( member_data_points, [centroids[i]], metric="euclidean" ) squared_distances = distances ** 2 heterogeneity += np.sum(squared_distances) return heterogeneity def plot_heterogeneity(heterogeneity, k): plt.figure(figsize=(7, 4)) plt.plot(heterogeneity, linewidth=4) plt.xlabel("# Iterations") plt.ylabel("Heterogeneity") plt.title(f"Heterogeneity of clustering over time, K={k:d}") plt.rcParams.update({"font.size": 16}) plt.show() def kmeans( data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False ): """This function runs k-means on given data and initial set of centroids. maxiter: maximum number of iterations to run.(default=500) record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations if None, do not store the history. verbose: if True, print how many data points changed their cluster labels in each iteration""" centroids = initial_centroids[:] prev_cluster_assignment = None for itr in range(maxiter): if verbose: print(itr, end="") # 1. Make cluster assignments using nearest centroids cluster_assignment = assign_clusters(data, centroids) # 2. Compute a new centroid for each of the k clusters, averaging all data # points assigned to that cluster. centroids = revise_centroids(data, k, cluster_assignment) # Check for convergence: if none of the assignments changed, stop if ( prev_cluster_assignment is not None and (prev_cluster_assignment == cluster_assignment).all() ): break # Print number of new assignments if prev_cluster_assignment is not None: num_changed = np.sum(prev_cluster_assignment != cluster_assignment) if verbose: print( " {:5d} elements changed their cluster assignment.".format( num_changed ) ) # Record heterogeneity convergence metric if record_heterogeneity is not None: # YOUR CODE HERE score = compute_heterogeneity(data, k, centroids, cluster_assignment) record_heterogeneity.append(score) prev_cluster_assignment = cluster_assignment[:] return centroids, cluster_assignment # Mock test below if False: # change to true to run this test case. from sklearn import datasets as ds dataset = ds.load_iris() k = 3 heterogeneity = [] initial_centroids = get_initial_centroids(dataset["data"], k, seed=0) centroids, cluster_assignment = kmeans( dataset["data"], k, initial_centroids, maxiter=400, record_heterogeneity=heterogeneity, verbose=True, ) plot_heterogeneity(heterogeneity, k) def ReportGenerator( df: pd.DataFrame, ClusteringVariables: np.array, FillMissingReport=None ) -> pd.DataFrame: """ Function generates easy-erading clustering report. It takes 2 arguments as an input: DataFrame - dataframe with predicted cluester column; FillMissingReport - dictionary of rules how we are going to fill missing values of for final report generate (not included in modeling); in order to run the function following libraries must be imported: import pandas as pd import numpy as np >>> data = pd.DataFrame() >>> data['numbers'] = [1, 2, 3] >>> data['col1'] = [0.5, 2.5, 4.5] >>> data['col2'] = [100, 200, 300] >>> data['col3'] = [10, 20, 30] >>> data['Cluster'] = [1, 1, 2] >>> ReportGenerator(data, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 2 col1 mean_with_zeros True 1.500000 4.500000 3 col2 mean_with_zeros True 150.000000 300.000000 4 numbers mean_with_zeros False 1.500000 3.000000 .. ... ... ... ... ... 99 dummy 5% False 1.000000 1.000000 100 dummy 95% False 1.000000 1.000000 101 dummy stdev False 0.000000 NaN 102 dummy mode False 1.000000 1.000000 103 dummy median False 1.000000 1.000000 <BLANKLINE> [104 rows x 5 columns] """ # Fill missing values with given rules if FillMissingReport: df.fillna(value=FillMissingReport, inplace=True) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( df.groupby(["Cluster"])[ # construct report dataframe numeric_cols ] # group by cluster number .agg( [ ("sum", np.sum), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), ( "mean_25-75", lambda x: np.mean( np.nan_to_num( sorted(x)[ round(len(x) * 25 / 100) : round(len(x) * 75 / 100) ] ) ), ), ("mean_with_na", np.mean), ("min", lambda x: x.min()), ("5%", lambda x: x.quantile(0.05)), ("25%", lambda x: x.quantile(0.25)), ("50%", lambda x: x.quantile(0.50)), ("75%", lambda x: x.quantile(0.75)), ("95%", lambda x: x.quantile(0.95)), ("max", lambda x: x.max()), ("count", lambda x: x.count()), ("stdev", lambda x: x.std()), ("mode", lambda x: x.mode()[0]), ("median", lambda x: x.median()), ("# > 0", lambda x: (x > 0).sum()), ] ) .T.reset_index() .rename(index=str, columns={"level_0": "Features", "level_1": "Type"}) ) # rename columns # calculate the size of cluster(count of clientID's) clustersize = report[ (report["Features"] == "dummy") & (report["Type"] == "count") ].copy() # avoid SettingWithCopyWarning clustersize.Type = ( "ClusterSize" # rename created cluster df to match report column names ) clustersize.Features = "# of Customers" clusterproportion = pd.DataFrame( clustersize.iloc[:, 2:].values / clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster ) clusterproportion[ "Type" ] = "% of Customers" # rename created cluster df to match report column names clusterproportion["Features"] = "ClusterProportion" cols = clusterproportion.columns.tolist() cols = cols[-2:] + cols[:-2] clusterproportion = clusterproportion[cols] # rearrange columns to match report clusterproportion.columns = report.columns a = pd.DataFrame( abs( report[report["Type"] == "count"].iloc[:, 2:].values - clustersize.iloc[:, 2:].values ) ) # generating df with count of nan values a["Features"] = 0 a["Type"] = "# of nan" a.Features = report[ report["Type"] == "count" ].Features.tolist() # filling values in order to match report cols = a.columns.tolist() cols = cols[-2:] + cols[:-2] a = a[cols] # rearrange columns to match report a.columns = report.columns # rename columns to match report report = report.drop( report[report.Type == "count"].index ) # drop count values except cluster size report = pd.concat( [report, a, clustersize, clusterproportion], axis=0 ) # concat report with clustert size and nan values report["Mark"] = report["Features"].isin(ClusteringVariables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] report = report[cols] sorter1 = { "ClusterSize": 9, "ClusterProportion": 8, "mean_with_zeros": 7, "mean_with_na": 6, "max": 5, "50%": 4, "min": 3, "25%": 2, "75%": 1, "# of nan": 0, "# > 0": -1, "sum_with_na": -2, } report = ( report.assign( Sorter1=lambda x: x.Type.map(sorter1), Sorter2=lambda x: list(reversed(range(len(x)))), ) .sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False) .drop(["Sorter1", "Sorter2"], axis=1) ) report.columns.name = "" report = report.reset_index() report.drop(columns=["index"], inplace=True) return report if __name__ == "__main__": import doctest doctest.testmod()
"""README, Author - Anurag Kumar(mailto:[email protected]) Requirements: - sklearn - numpy - matplotlib Python: - 3.5 Inputs: - X , a 2D numpy array of features. - k , number of clusters to create. - initial_centroids , initial centroid values generated by utility function(mentioned in usage). - maxiter , maximum number of iterations to process. - heterogeneity , empty list that will be filled with hetrogeneity values if passed to kmeans func. Usage: 1. define 'k' value, 'X' features array and 'hetrogeneity' empty list 2. create initial_centroids, initial_centroids = get_initial_centroids( X, k, seed=0 # seed value for initial centroid generation, # None for randomness(default=None) ) 3. find centroids and clusters using kmeans function. centroids, cluster_assignment = kmeans( X, k, initial_centroids, maxiter=400, record_heterogeneity=heterogeneity, verbose=True # whether to print logs in console or not.(default=False) ) 4. Plot the loss function, hetrogeneity values for every iteration saved in hetrogeneity list. plot_heterogeneity( heterogeneity, k ) 5. Transfers Dataframe into excel format it must have feature called 'Clust' with k means clustering numbers in it. """ import warnings import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.metrics import pairwise_distances warnings.filterwarnings("ignore") TAG = "K-MEANS-CLUST/ " def get_initial_centroids(data, k, seed=None): """Randomly choose k data points as initial centroids""" if seed is not None: # useful for obtaining consistent results np.random.seed(seed) n = data.shape[0] # number of data points # Pick K indices from range [0, N). rand_indices = np.random.randint(0, n, k) # Keep centroids as dense format, as many entries will be nonzero due to averaging. # As long as at least one document in a cluster contains a word, # it will carry a nonzero weight in the TF-IDF vector of the centroid. centroids = data[rand_indices, :] return centroids def centroid_pairwise_dist(X, centroids): return pairwise_distances(X, centroids, metric="euclidean") def assign_clusters(data, centroids): # Compute distances between each data point and the set of centroids: # Fill in the blank (RHS only) distances_from_centroids = centroid_pairwise_dist(data, centroids) # Compute cluster assignments for each data point: # Fill in the blank (RHS only) cluster_assignment = np.argmin(distances_from_centroids, axis=1) return cluster_assignment def revise_centroids(data, k, cluster_assignment): new_centroids = [] for i in range(k): # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i] # Compute the mean of the data points. Fill in the blank (RHS only) centroid = member_data_points.mean(axis=0) new_centroids.append(centroid) new_centroids = np.array(new_centroids) return new_centroids def compute_heterogeneity(data, k, centroids, cluster_assignment): heterogeneity = 0.0 for i in range(k): # Select all data points that belong to cluster i. Fill in the blank (RHS only) member_data_points = data[cluster_assignment == i, :] if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty # Compute distances from centroid to data points (RHS only) distances = pairwise_distances( member_data_points, [centroids[i]], metric="euclidean" ) squared_distances = distances ** 2 heterogeneity += np.sum(squared_distances) return heterogeneity def plot_heterogeneity(heterogeneity, k): plt.figure(figsize=(7, 4)) plt.plot(heterogeneity, linewidth=4) plt.xlabel("# Iterations") plt.ylabel("Heterogeneity") plt.title(f"Heterogeneity of clustering over time, K={k:d}") plt.rcParams.update({"font.size": 16}) plt.show() def kmeans( data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False ): """This function runs k-means on given data and initial set of centroids. maxiter: maximum number of iterations to run.(default=500) record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations if None, do not store the history. verbose: if True, print how many data points changed their cluster labels in each iteration""" centroids = initial_centroids[:] prev_cluster_assignment = None for itr in range(maxiter): if verbose: print(itr, end="") # 1. Make cluster assignments using nearest centroids cluster_assignment = assign_clusters(data, centroids) # 2. Compute a new centroid for each of the k clusters, averaging all data # points assigned to that cluster. centroids = revise_centroids(data, k, cluster_assignment) # Check for convergence: if none of the assignments changed, stop if ( prev_cluster_assignment is not None and (prev_cluster_assignment == cluster_assignment).all() ): break # Print number of new assignments if prev_cluster_assignment is not None: num_changed = np.sum(prev_cluster_assignment != cluster_assignment) if verbose: print( " {:5d} elements changed their cluster assignment.".format( num_changed ) ) # Record heterogeneity convergence metric if record_heterogeneity is not None: # YOUR CODE HERE score = compute_heterogeneity(data, k, centroids, cluster_assignment) record_heterogeneity.append(score) prev_cluster_assignment = cluster_assignment[:] return centroids, cluster_assignment # Mock test below if False: # change to true to run this test case. from sklearn import datasets as ds dataset = ds.load_iris() k = 3 heterogeneity = [] initial_centroids = get_initial_centroids(dataset["data"], k, seed=0) centroids, cluster_assignment = kmeans( dataset["data"], k, initial_centroids, maxiter=400, record_heterogeneity=heterogeneity, verbose=True, ) plot_heterogeneity(heterogeneity, k) def ReportGenerator( df: pd.DataFrame, ClusteringVariables: np.ndarray, FillMissingReport=None ) -> pd.DataFrame: """ Function generates easy-erading clustering report. It takes 2 arguments as an input: DataFrame - dataframe with predicted cluester column; FillMissingReport - dictionary of rules how we are going to fill missing values of for final report generate (not included in modeling); in order to run the function following libraries must be imported: import pandas as pd import numpy as np >>> data = pd.DataFrame() >>> data['numbers'] = [1, 2, 3] >>> data['col1'] = [0.5, 2.5, 4.5] >>> data['col2'] = [100, 200, 300] >>> data['col3'] = [10, 20, 30] >>> data['Cluster'] = [1, 1, 2] >>> ReportGenerator(data, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 2 col1 mean_with_zeros True 1.500000 4.500000 3 col2 mean_with_zeros True 150.000000 300.000000 4 numbers mean_with_zeros False 1.500000 3.000000 .. ... ... ... ... ... 99 dummy 5% False 1.000000 1.000000 100 dummy 95% False 1.000000 1.000000 101 dummy stdev False 0.000000 NaN 102 dummy mode False 1.000000 1.000000 103 dummy median False 1.000000 1.000000 <BLANKLINE> [104 rows x 5 columns] """ # Fill missing values with given rules if FillMissingReport: df.fillna(value=FillMissingReport, inplace=True) df["dummy"] = 1 numeric_cols = df.select_dtypes(np.number).columns report = ( df.groupby(["Cluster"])[ # construct report dataframe numeric_cols ] # group by cluster number .agg( [ ("sum", np.sum), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), ( "mean_25-75", lambda x: np.mean( np.nan_to_num( sorted(x)[ round(len(x) * 25 / 100) : round(len(x) * 75 / 100) ] ) ), ), ("mean_with_na", np.mean), ("min", lambda x: x.min()), ("5%", lambda x: x.quantile(0.05)), ("25%", lambda x: x.quantile(0.25)), ("50%", lambda x: x.quantile(0.50)), ("75%", lambda x: x.quantile(0.75)), ("95%", lambda x: x.quantile(0.95)), ("max", lambda x: x.max()), ("count", lambda x: x.count()), ("stdev", lambda x: x.std()), ("mode", lambda x: x.mode()[0]), ("median", lambda x: x.median()), ("# > 0", lambda x: (x > 0).sum()), ] ) .T.reset_index() .rename(index=str, columns={"level_0": "Features", "level_1": "Type"}) ) # rename columns # calculate the size of cluster(count of clientID's) clustersize = report[ (report["Features"] == "dummy") & (report["Type"] == "count") ].copy() # avoid SettingWithCopyWarning clustersize.Type = ( "ClusterSize" # rename created cluster df to match report column names ) clustersize.Features = "# of Customers" clusterproportion = pd.DataFrame( clustersize.iloc[:, 2:].values / clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster ) clusterproportion[ "Type" ] = "% of Customers" # rename created cluster df to match report column names clusterproportion["Features"] = "ClusterProportion" cols = clusterproportion.columns.tolist() cols = cols[-2:] + cols[:-2] clusterproportion = clusterproportion[cols] # rearrange columns to match report clusterproportion.columns = report.columns a = pd.DataFrame( abs( report[report["Type"] == "count"].iloc[:, 2:].values - clustersize.iloc[:, 2:].values ) ) # generating df with count of nan values a["Features"] = 0 a["Type"] = "# of nan" a.Features = report[ report["Type"] == "count" ].Features.tolist() # filling values in order to match report cols = a.columns.tolist() cols = cols[-2:] + cols[:-2] a = a[cols] # rearrange columns to match report a.columns = report.columns # rename columns to match report report = report.drop( report[report.Type == "count"].index ) # drop count values except cluster size report = pd.concat( [report, a, clustersize, clusterproportion], axis=0 ) # concat report with clustert size and nan values report["Mark"] = report["Features"].isin(ClusteringVariables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] report = report[cols] sorter1 = { "ClusterSize": 9, "ClusterProportion": 8, "mean_with_zeros": 7, "mean_with_na": 6, "max": 5, "50%": 4, "min": 3, "25%": 2, "75%": 1, "# of nan": 0, "# > 0": -1, "sum_with_na": -2, } report = ( report.assign( Sorter1=lambda x: x.Type.map(sorter1), Sorter2=lambda x: list(reversed(range(len(x)))), ) .sort_values(["Sorter1", "Mark", "Sorter2"], ascending=False) .drop(["Sorter1", "Sorter2"], axis=1) ) report.columns.name = "" report = report.reset_index() report.drop(columns=["index"], inplace=True) return report if __name__ == "__main__": import doctest doctest.testmod()
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
import string from math import log10 """ tf-idf Wikipedia: https://en.wikipedia.org/wiki/Tf%E2%80%93idf tf-idf and other word frequency algorithms are often used as a weighting factor in information retrieval and text mining. 83% of text-based recommender systems use tf-idf for term weighting. In Layman's terms, tf-idf is a statistic intended to reflect how important a word is to a document in a corpus (a collection of documents) Here I've implemented several word frequency algorithms that are commonly used in information retrieval: Term Frequency, Document Frequency, and TF-IDF (Term-Frequency*Inverse-Document-Frequency) are included. Term Frequency is a statistical function that returns a number representing how frequently an expression occurs in a document. This indicates how significant a particular term is in a given document. Document Frequency is a statistical function that returns an integer representing the number of documents in a corpus that a term occurs in (where the max number returned would be the number of documents in the corpus). Inverse Document Frequency is mathematically written as log10(N/df), where N is the number of documents in your corpus and df is the Document Frequency. If df is 0, a ZeroDivisionError will be thrown. Term-Frequency*Inverse-Document-Frequency is a measure of the originality of a term. It is mathematically written as tf*log10(N/df). It compares the number of times a term appears in a document with the number of documents the term appears in. If df is 0, a ZeroDivisionError will be thrown. """ def term_frequency(term: str, document: str) -> int: """ Return the number of times a term occurs within a given document. @params: term, the term to search a document for, and document, the document to search within @returns: an integer representing the number of times a term is found within the document @examples: >>> term_frequency("to", "To be, or not to be") 2 """ # strip all punctuation and newlines and replace it with '' document_without_punctuation = document.translate( str.maketrans("", "", string.punctuation) ).replace("\n", "") tokenize_document = document_without_punctuation.split(" ") # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()]) def document_frequency(term: str, corpus: str) -> int: """ Calculate the number of documents in a corpus that contain a given term @params : term, the term to search each document for, and corpus, a collection of documents. Each document should be separated by a newline. @returns : the number of documents in the corpus that contain the term you are searching for and the number of documents in the corpus @examples : >>> document_frequency("first", "This is the first document in the corpus.\\nThIs\ is the second document in the corpus.\\nTHIS is \ the third document in the corpus.") (1, 3) """ corpus_without_punctuation = corpus.lower().translate( str.maketrans("", "", string.punctuation) ) # strip all punctuation and replace it with '' docs = corpus_without_punctuation.split("\n") term = term.lower() return (len([doc for doc in docs if term in doc]), len(docs)) def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: """ Return an integer denoting the importance of a word. This measure of importance is calculated by log10(N/df), where N is the number of documents and df is the Document Frequency. @params : df, the Document Frequency, N, the number of documents in the corpus and smoothing, if True return the idf-smooth @returns : log10(N/df) or 1+log10(N/1+df) @examples : >>> inverse_document_frequency(3, 0) Traceback (most recent call last): ... ValueError: log10(0) is undefined. >>> inverse_document_frequency(1, 3) 0.477 >>> inverse_document_frequency(0, 3) Traceback (most recent call last): ... ZeroDivisionError: df must be > 0 >>> inverse_document_frequency(0, 3,True) 1.477 """ if smoothing: if N == 0: raise ValueError("log10(0) is undefined.") return round(1 + log10(N / (1 + df)), 3) if df == 0: raise ZeroDivisionError("df must be > 0") elif N == 0: raise ValueError("log10(0) is undefined.") return round(log10(N / df), 3) def tf_idf(tf: int, idf: int) -> float: """ Combine the term frequency and inverse document frequency functions to calculate the originality of a term. This 'originality' is calculated by multiplying the term frequency and the inverse document frequency : tf-idf = TF * IDF @params : tf, the term frequency, and idf, the inverse document frequency @examples : >>> tf_idf(2, 0.477) 0.954 """ return round(tf * idf, 3)
import string from math import log10 """ tf-idf Wikipedia: https://en.wikipedia.org/wiki/Tf%E2%80%93idf tf-idf and other word frequency algorithms are often used as a weighting factor in information retrieval and text mining. 83% of text-based recommender systems use tf-idf for term weighting. In Layman's terms, tf-idf is a statistic intended to reflect how important a word is to a document in a corpus (a collection of documents) Here I've implemented several word frequency algorithms that are commonly used in information retrieval: Term Frequency, Document Frequency, and TF-IDF (Term-Frequency*Inverse-Document-Frequency) are included. Term Frequency is a statistical function that returns a number representing how frequently an expression occurs in a document. This indicates how significant a particular term is in a given document. Document Frequency is a statistical function that returns an integer representing the number of documents in a corpus that a term occurs in (where the max number returned would be the number of documents in the corpus). Inverse Document Frequency is mathematically written as log10(N/df), where N is the number of documents in your corpus and df is the Document Frequency. If df is 0, a ZeroDivisionError will be thrown. Term-Frequency*Inverse-Document-Frequency is a measure of the originality of a term. It is mathematically written as tf*log10(N/df). It compares the number of times a term appears in a document with the number of documents the term appears in. If df is 0, a ZeroDivisionError will be thrown. """ def term_frequency(term: str, document: str) -> int: """ Return the number of times a term occurs within a given document. @params: term, the term to search a document for, and document, the document to search within @returns: an integer representing the number of times a term is found within the document @examples: >>> term_frequency("to", "To be, or not to be") 2 """ # strip all punctuation and newlines and replace it with '' document_without_punctuation = document.translate( str.maketrans("", "", string.punctuation) ).replace("\n", "") tokenize_document = document_without_punctuation.split(" ") # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()]) def document_frequency(term: str, corpus: str) -> tuple[int, int]: """ Calculate the number of documents in a corpus that contain a given term @params : term, the term to search each document for, and corpus, a collection of documents. Each document should be separated by a newline. @returns : the number of documents in the corpus that contain the term you are searching for and the number of documents in the corpus @examples : >>> document_frequency("first", "This is the first document in the corpus.\\nThIs\ is the second document in the corpus.\\nTHIS is \ the third document in the corpus.") (1, 3) """ corpus_without_punctuation = corpus.lower().translate( str.maketrans("", "", string.punctuation) ) # strip all punctuation and replace it with '' docs = corpus_without_punctuation.split("\n") term = term.lower() return (len([doc for doc in docs if term in doc]), len(docs)) def inverse_document_frequency(df: int, N: int, smoothing=False) -> float: """ Return an integer denoting the importance of a word. This measure of importance is calculated by log10(N/df), where N is the number of documents and df is the Document Frequency. @params : df, the Document Frequency, N, the number of documents in the corpus and smoothing, if True return the idf-smooth @returns : log10(N/df) or 1+log10(N/1+df) @examples : >>> inverse_document_frequency(3, 0) Traceback (most recent call last): ... ValueError: log10(0) is undefined. >>> inverse_document_frequency(1, 3) 0.477 >>> inverse_document_frequency(0, 3) Traceback (most recent call last): ... ZeroDivisionError: df must be > 0 >>> inverse_document_frequency(0, 3,True) 1.477 """ if smoothing: if N == 0: raise ValueError("log10(0) is undefined.") return round(1 + log10(N / (1 + df)), 3) if df == 0: raise ZeroDivisionError("df must be > 0") elif N == 0: raise ValueError("log10(0) is undefined.") return round(log10(N / df), 3) def tf_idf(tf: int, idf: int) -> float: """ Combine the term frequency and inverse document frequency functions to calculate the originality of a term. This 'originality' is calculated by multiplying the term frequency and the inverse document frequency : tf-idf = TF * IDF @params : tf, the term frequency, and idf, the inverse document frequency @examples : >>> tf_idf(2, 0.477) 0.954 """ return round(tf * idf, 3)
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Perceptron w = w + N * (d(k) - y) * x(k) Using perceptron network for oil analysis, with Measuring of 3 parameters that represent chemical characteristics we can classify the oil, in p1 or p2 p1 = -1 p2 = 1 """ import random class Perceptron: def __init__(self, sample, target, learning_rate=0.01, epoch_number=1000, bias=-1): """ Initializes a Perceptron network for oil analysis :param sample: sample dataset of 3 parameters with shape [30,3] :param target: variable for classification with two possible states -1 or 1 :param learning_rate: learning rate used in optimizing. :param epoch_number: number of epochs to train network on. :param bias: bias value for the network. >>> p = Perceptron([], (0, 1, 2)) Traceback (most recent call last): ... ValueError: Sample data can not be empty >>> p = Perceptron(([0], 1, 2), []) Traceback (most recent call last): ... ValueError: Target data can not be empty >>> p = Perceptron(([0], 1, 2), (0, 1)) Traceback (most recent call last): ... ValueError: Sample data and Target data do not have matching lengths """ self.sample = sample if len(self.sample) == 0: raise ValueError("Sample data can not be empty") self.target = target if len(self.target) == 0: raise ValueError("Target data can not be empty") if len(self.sample) != len(self.target): raise ValueError("Sample data and Target data do not have matching lengths") self.learning_rate = learning_rate self.epoch_number = epoch_number self.bias = bias self.number_sample = len(sample) self.col_sample = len(sample[0]) # number of columns in dataset self.weight = [] def training(self) -> None: """ Trains perceptron for epochs <= given number of epochs :return: None >>> data = [[2.0149, 0.6192, 10.9263]] >>> targets = [-1] >>> perceptron = Perceptron(data,targets) >>> perceptron.training() # doctest: +ELLIPSIS ('\\nEpoch:\\n', ...) ... """ for sample in self.sample: sample.insert(0, self.bias) for i in range(self.col_sample): self.weight.append(random.random()) self.weight.insert(0, self.bias) epoch_count = 0 while True: has_misclassified = False for i in range(self.number_sample): u = 0 for j in range(self.col_sample + 1): u = u + self.weight[j] * self.sample[i][j] y = self.sign(u) if y != self.target[i]: for j in range(self.col_sample + 1): self.weight[j] = ( self.weight[j] + self.learning_rate * (self.target[i] - y) * self.sample[i][j] ) has_misclassified = True # print('Epoch: \n',epoch_count) epoch_count = epoch_count + 1 # if you want control the epoch or just by error if not has_misclassified: print(("\nEpoch:\n", epoch_count)) print("------------------------\n") # if epoch_count > self.epoch_number or not error: break def sort(self, sample) -> None: """ :param sample: example row to classify as P1 or P2 :return: None >>> data = [[2.0149, 0.6192, 10.9263]] >>> targets = [-1] >>> perceptron = Perceptron(data,targets) >>> perceptron.training() # doctest: +ELLIPSIS ('\\nEpoch:\\n', ...) ... >>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS ('Sample: ', ...) classification: P... """ if len(self.sample) == 0: raise ValueError("Sample data can not be empty") sample.insert(0, self.bias) u = 0 for i in range(self.col_sample + 1): u = u + self.weight[i] * sample[i] y = self.sign(u) if y == -1: print(("Sample: ", sample)) print("classification: P1") else: print(("Sample: ", sample)) print("classification: P2") def sign(self, u: float) -> int: """ threshold function for classification :param u: input number :return: 1 if the input is greater than 0, otherwise -1 >>> data = [[0],[-0.5],[0.5]] >>> targets = [1,-1,1] >>> perceptron = Perceptron(data,targets) >>> perceptron.sign(0) 1 >>> perceptron.sign(-0.5) -1 >>> perceptron.sign(0.5) 1 """ return 1 if u >= 0 else -1 samples = [ [-0.6508, 0.1097, 4.0009], [-1.4492, 0.8896, 4.4005], [2.0850, 0.6876, 12.0710], [0.2626, 1.1476, 7.7985], [0.6418, 1.0234, 7.0427], [0.2569, 0.6730, 8.3265], [1.1155, 0.6043, 7.4446], [0.0914, 0.3399, 7.0677], [0.0121, 0.5256, 4.6316], [-0.0429, 0.4660, 5.4323], [0.4340, 0.6870, 8.2287], [0.2735, 1.0287, 7.1934], [0.4839, 0.4851, 7.4850], [0.4089, -0.1267, 5.5019], [1.4391, 0.1614, 8.5843], [-0.9115, -0.1973, 2.1962], [0.3654, 1.0475, 7.4858], [0.2144, 0.7515, 7.1699], [0.2013, 1.0014, 6.5489], [0.6483, 0.2183, 5.8991], [-0.1147, 0.2242, 7.2435], [-0.7970, 0.8795, 3.8762], [-1.0625, 0.6366, 2.4707], [0.5307, 0.1285, 5.6883], [-1.2200, 0.7777, 1.7252], [0.3957, 0.1076, 5.6623], [-0.1013, 0.5989, 7.1812], [2.4482, 0.9455, 11.2095], [2.0149, 0.6192, 10.9263], [0.2012, 0.2611, 5.4631], ] exit = [ -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, ] if __name__ == "__main__": import doctest doctest.testmod() network = Perceptron( sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1 ) network.training() print("Finished training perceptron") print("Enter values to predict or q to exit") while True: sample = [] for i in range(len(samples[0])): observation = input("value: ").strip() if observation == "q": break observation = float(observation) sample.insert(i, observation) network.sort(sample)
""" Perceptron w = w + N * (d(k) - y) * x(k) Using perceptron network for oil analysis, with Measuring of 3 parameters that represent chemical characteristics we can classify the oil, in p1 or p2 p1 = -1 p2 = 1 """ import random class Perceptron: def __init__( self, sample: list[list[float]], target: list[int], learning_rate: float = 0.01, epoch_number: int = 1000, bias: float = -1, ) -> None: """ Initializes a Perceptron network for oil analysis :param sample: sample dataset of 3 parameters with shape [30,3] :param target: variable for classification with two possible states -1 or 1 :param learning_rate: learning rate used in optimizing. :param epoch_number: number of epochs to train network on. :param bias: bias value for the network. >>> p = Perceptron([], (0, 1, 2)) Traceback (most recent call last): ... ValueError: Sample data can not be empty >>> p = Perceptron(([0], 1, 2), []) Traceback (most recent call last): ... ValueError: Target data can not be empty >>> p = Perceptron(([0], 1, 2), (0, 1)) Traceback (most recent call last): ... ValueError: Sample data and Target data do not have matching lengths """ self.sample = sample if len(self.sample) == 0: raise ValueError("Sample data can not be empty") self.target = target if len(self.target) == 0: raise ValueError("Target data can not be empty") if len(self.sample) != len(self.target): raise ValueError("Sample data and Target data do not have matching lengths") self.learning_rate = learning_rate self.epoch_number = epoch_number self.bias = bias self.number_sample = len(sample) self.col_sample = len(sample[0]) # number of columns in dataset self.weight: list = [] def training(self) -> None: """ Trains perceptron for epochs <= given number of epochs :return: None >>> data = [[2.0149, 0.6192, 10.9263]] >>> targets = [-1] >>> perceptron = Perceptron(data,targets) >>> perceptron.training() # doctest: +ELLIPSIS ('\\nEpoch:\\n', ...) ... """ for sample in self.sample: sample.insert(0, self.bias) for i in range(self.col_sample): self.weight.append(random.random()) self.weight.insert(0, self.bias) epoch_count = 0 while True: has_misclassified = False for i in range(self.number_sample): u = 0 for j in range(self.col_sample + 1): u = u + self.weight[j] * self.sample[i][j] y = self.sign(u) if y != self.target[i]: for j in range(self.col_sample + 1): self.weight[j] = ( self.weight[j] + self.learning_rate * (self.target[i] - y) * self.sample[i][j] ) has_misclassified = True # print('Epoch: \n',epoch_count) epoch_count = epoch_count + 1 # if you want control the epoch or just by error if not has_misclassified: print(("\nEpoch:\n", epoch_count)) print("------------------------\n") # if epoch_count > self.epoch_number or not error: break def sort(self, sample: list[float]) -> None: """ :param sample: example row to classify as P1 or P2 :return: None >>> data = [[2.0149, 0.6192, 10.9263]] >>> targets = [-1] >>> perceptron = Perceptron(data,targets) >>> perceptron.training() # doctest: +ELLIPSIS ('\\nEpoch:\\n', ...) ... >>> perceptron.sort([-0.6508, 0.1097, 4.0009]) # doctest: +ELLIPSIS ('Sample: ', ...) classification: P... """ if len(self.sample) == 0: raise ValueError("Sample data can not be empty") sample.insert(0, self.bias) u = 0 for i in range(self.col_sample + 1): u = u + self.weight[i] * sample[i] y = self.sign(u) if y == -1: print(("Sample: ", sample)) print("classification: P1") else: print(("Sample: ", sample)) print("classification: P2") def sign(self, u: float) -> int: """ threshold function for classification :param u: input number :return: 1 if the input is greater than 0, otherwise -1 >>> data = [[0],[-0.5],[0.5]] >>> targets = [1,-1,1] >>> perceptron = Perceptron(data,targets) >>> perceptron.sign(0) 1 >>> perceptron.sign(-0.5) -1 >>> perceptron.sign(0.5) 1 """ return 1 if u >= 0 else -1 samples = [ [-0.6508, 0.1097, 4.0009], [-1.4492, 0.8896, 4.4005], [2.0850, 0.6876, 12.0710], [0.2626, 1.1476, 7.7985], [0.6418, 1.0234, 7.0427], [0.2569, 0.6730, 8.3265], [1.1155, 0.6043, 7.4446], [0.0914, 0.3399, 7.0677], [0.0121, 0.5256, 4.6316], [-0.0429, 0.4660, 5.4323], [0.4340, 0.6870, 8.2287], [0.2735, 1.0287, 7.1934], [0.4839, 0.4851, 7.4850], [0.4089, -0.1267, 5.5019], [1.4391, 0.1614, 8.5843], [-0.9115, -0.1973, 2.1962], [0.3654, 1.0475, 7.4858], [0.2144, 0.7515, 7.1699], [0.2013, 1.0014, 6.5489], [0.6483, 0.2183, 5.8991], [-0.1147, 0.2242, 7.2435], [-0.7970, 0.8795, 3.8762], [-1.0625, 0.6366, 2.4707], [0.5307, 0.1285, 5.6883], [-1.2200, 0.7777, 1.7252], [0.3957, 0.1076, 5.6623], [-0.1013, 0.5989, 7.1812], [2.4482, 0.9455, 11.2095], [2.0149, 0.6192, 10.9263], [0.2012, 0.2611, 5.4631], ] exit = [ -1, -1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, ] if __name__ == "__main__": import doctest doctest.testmod() network = Perceptron( sample=samples, target=exit, learning_rate=0.01, epoch_number=1000, bias=-1 ) network.training() print("Finished training perceptron") print("Enter values to predict or q to exit") while True: sample: list = [] for i in range(len(samples[0])): user_input = input("value: ").strip() if user_input == "q": break observation = float(user_input) sample.insert(i, observation) network.sort(sample)
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# flake8: noqa """ This is pure Python implementation of tree traversal algorithms """ from __future__ import annotations import queue class TreeNode: def __init__(self, data): self.data = data self.right = None self.left = None def build_tree(): print("\n********Press N to stop entering at any point of time********\n") check = input("Enter the value of the root node: ").strip().lower() or "n" if check == "n": return None q: queue.Queue = queue.Queue() tree_node = TreeNode(int(check)) q.put(tree_node) while not q.empty(): node_found = q.get() msg = "Enter the left node of %s: " % node_found.data check = input(msg).strip().lower() or "n" if check == "n": return tree_node left_node = TreeNode(int(check)) node_found.left = left_node q.put(left_node) msg = "Enter the right node of %s: " % node_found.data check = input(msg).strip().lower() or "n" if check == "n": return tree_node right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) def pre_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> pre_order(root) 1,2,4,5,3,6,7, """ if not isinstance(node, TreeNode) or not node: return print(node.data, end=",") pre_order(node.left) pre_order(node.right) def in_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> in_order(root) 4,2,5,1,6,3,7, """ if not isinstance(node, TreeNode) or not node: return in_order(node.left) print(node.data, end=",") in_order(node.right) def post_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> post_order(root) 4,5,2,6,7,3,1, """ if not isinstance(node, TreeNode) or not node: return post_order(node.left) post_order(node.right) print(node.data, end=",") def level_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> level_order(root) 1,2,3,4,5,6,7, """ if not isinstance(node, TreeNode) or not node: return q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def level_order_actual(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> level_order_actual(root) 1, 2,3, 4,5,6,7, """ if not isinstance(node, TreeNode) or not node: return q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): list = [] while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: list.append(node_dequeued.left) if node_dequeued.right: list.append(node_dequeued.right) print() for node in list: q.put(node) # iteration version def pre_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> pre_order_iter(root) 1,2,4,5,3,6,7, """ if not isinstance(node, TreeNode) or not node: return stack: List[TreeNode] = [] n = node while n or stack: while n: # start from root node, find its left child print(n.data, end=",") stack.append(n) n = n.left # end of while means current node doesn't have left child n = stack.pop() # start to traverse its right child n = n.right def in_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> in_order_iter(root) 4,2,5,1,6,3,7, """ if not isinstance(node, TreeNode) or not node: return stack: List[TreeNode] = [] n = node while n or stack: while n: stack.append(n) n = n.left n = stack.pop() print(n.data, end=",") n = n.right def post_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> post_order_iter(root) 4,5,2,6,7,3,1, """ if not isinstance(node, TreeNode) or not node: return stack1, stack2 = [], [] n = node stack1.append(n) while stack1: # to find the reversed order of post order, store it in stack2 n = stack1.pop() if n.left: stack1.append(n.left) if n.right: stack1.append(n.right) stack2.append(n) while stack2: # pop up from stack2 will be the post order print(stack2.pop().data, end=",") def prompt(s: str = "", width=50, char="*") -> str: if not s: return "\n" + width * char left, extra = divmod(width - len(s) - 2, 2) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) node = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
# flake8: noqa """ This is pure Python implementation of tree traversal algorithms """ from __future__ import annotations import queue class TreeNode: def __init__(self, data): self.data = data self.right = None self.left = None def build_tree(): print("\n********Press N to stop entering at any point of time********\n") check = input("Enter the value of the root node: ").strip().lower() or "n" if check == "n": return None q: queue.Queue = queue.Queue() tree_node = TreeNode(int(check)) q.put(tree_node) while not q.empty(): node_found = q.get() msg = "Enter the left node of %s: " % node_found.data check = input(msg).strip().lower() or "n" if check == "n": return tree_node left_node = TreeNode(int(check)) node_found.left = left_node q.put(left_node) msg = "Enter the right node of %s: " % node_found.data check = input(msg).strip().lower() or "n" if check == "n": return tree_node right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) def pre_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> pre_order(root) 1,2,4,5,3,6,7, """ if not isinstance(node, TreeNode) or not node: return print(node.data, end=",") pre_order(node.left) pre_order(node.right) def in_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> in_order(root) 4,2,5,1,6,3,7, """ if not isinstance(node, TreeNode) or not node: return in_order(node.left) print(node.data, end=",") in_order(node.right) def post_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> post_order(root) 4,5,2,6,7,3,1, """ if not isinstance(node, TreeNode) or not node: return post_order(node.left) post_order(node.right) print(node.data, end=",") def level_order(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> level_order(root) 1,2,3,4,5,6,7, """ if not isinstance(node, TreeNode) or not node: return q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def level_order_actual(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> level_order_actual(root) 1, 2,3, 4,5,6,7, """ if not isinstance(node, TreeNode) or not node: return q: queue.Queue = queue.Queue() q.put(node) while not q.empty(): list = [] while not q.empty(): node_dequeued = q.get() print(node_dequeued.data, end=",") if node_dequeued.left: list.append(node_dequeued.left) if node_dequeued.right: list.append(node_dequeued.right) print() for node in list: q.put(node) # iteration version def pre_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> pre_order_iter(root) 1,2,4,5,3,6,7, """ if not isinstance(node, TreeNode) or not node: return stack: list[TreeNode] = [] n = node while n or stack: while n: # start from root node, find its left child print(n.data, end=",") stack.append(n) n = n.left # end of while means current node doesn't have left child n = stack.pop() # start to traverse its right child n = n.right def in_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> in_order_iter(root) 4,2,5,1,6,3,7, """ if not isinstance(node, TreeNode) or not node: return stack: list[TreeNode] = [] n = node while n or stack: while n: stack.append(n) n = n.left n = stack.pop() print(n.data, end=",") n = n.right def post_order_iter(node: TreeNode) -> None: """ >>> root = TreeNode(1) >>> tree_node2 = TreeNode(2) >>> tree_node3 = TreeNode(3) >>> tree_node4 = TreeNode(4) >>> tree_node5 = TreeNode(5) >>> tree_node6 = TreeNode(6) >>> tree_node7 = TreeNode(7) >>> root.left, root.right = tree_node2, tree_node3 >>> tree_node2.left, tree_node2.right = tree_node4 , tree_node5 >>> tree_node3.left, tree_node3.right = tree_node6 , tree_node7 >>> post_order_iter(root) 4,5,2,6,7,3,1, """ if not isinstance(node, TreeNode) or not node: return stack1, stack2 = [], [] n = node stack1.append(n) while stack1: # to find the reversed order of post order, store it in stack2 n = stack1.pop() if n.left: stack1.append(n.left) if n.right: stack1.append(n.right) stack2.append(n) while stack2: # pop up from stack2 will be the post order print(stack2.pop().data, end=",") def prompt(s: str = "", width=50, char="*") -> str: if not s: return "\n" + width * char left, extra = divmod(width - len(s) - 2, 2) return f"{left * char} {s} {(left + extra) * char}" if __name__ == "__main__": import doctest doctest.testmod() print(prompt("Binary Tree Traversals")) node = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") print(prompt("In Order Traversal")) in_order(node) print(prompt() + "\n") print(prompt("Post Order Traversal")) post_order(node) print(prompt() + "\n") print(prompt("Level Order Traversal")) level_order(node) print(prompt() + "\n") print(prompt("Actual Level Order Traversal")) level_order_actual(node) print("*" * 50 + "\n") print(prompt("Pre Order Traversal - Iteration Version")) pre_order_iter(node) print(prompt() + "\n") print(prompt("In Order Traversal - Iteration Version")) in_order_iter(node) print(prompt() + "\n") print(prompt("Post Order Traversal - Iteration Version")) post_order_iter(node) print(prompt())
1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# https://en.wikipedia.org/wiki/Tree_traversal class Node: """ A Node has data variable and pointers to its left and right nodes. """ def __init__(self, data): self.left = None self.right = None self.data = data def make_tree() -> Node: root = Node(1) root.left = Node(2) root.right = Node(3) root.left.left = Node(4) root.left.right = Node(5) return root def preorder(root: Node): """ Pre-order traversal visits root node, left subtree, right subtree. >>> preorder(make_tree()) [1, 2, 4, 5, 3] """ return [root.data] + preorder(root.left) + preorder(root.right) if root else [] def postorder(root: Node): """ Post-order traversal visits left subtree, right subtree, root node. >>> postorder(make_tree()) [4, 5, 2, 3, 1] """ return postorder(root.left) + postorder(root.right) + [root.data] if root else [] def inorder(root: Node): """ In-order traversal visits left subtree, root node, right subtree. >>> inorder(make_tree()) [4, 2, 5, 1, 3] """ return inorder(root.left) + [root.data] + inorder(root.right) if root else [] def height(root: Node): """ Recursive function for calculating the height of the binary tree. >>> height(None) 0 >>> height(make_tree()) 3 """ return (max(height(root.left), height(root.right)) + 1) if root else 0 def level_order_1(root: Node): """ Print whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. """ if not root: return temp = root que = [temp] while len(que) > 0: print(que[0].data, end=" ") temp = que.pop(0) if temp.left: que.append(temp.left) if temp.right: que.append(temp.right) return que def level_order_2(root: Node, level: int): """ Level-wise traversal: Print all nodes present at the given level of the binary tree """ if not root: return root if level == 1: print(root.data, end=" ") elif level > 1: level_order_2(root.left, level - 1) level_order_2(root.right, level - 1) def print_left_to_right(root: Node, level: int): """ Print elements on particular level from left to right direction of the binary tree. """ if not root: return if level == 1: print(root.data, end=" ") elif level > 1: print_left_to_right(root.left, level - 1) print_left_to_right(root.right, level - 1) def print_right_to_left(root: Node, level: int): """ Print elements on particular level from right to left direction of the binary tree. """ if not root: return if level == 1: print(root.data, end=" ") elif level > 1: print_right_to_left(root.right, level - 1) print_right_to_left(root.left, level - 1) def zigzag(root: Node): """ ZigZag traverse: Print node left to right and right to left, alternatively. """ flag = 0 height_tree = height(root) for h in range(1, height_tree + 1): if flag == 0: print_left_to_right(root, h) flag = 1 else: print_right_to_left(root, h) flag = 0 def main(): # Main function for testing. """ Create binary tree. """ root = make_tree() """ All Traversals of the binary are as follows: """ print(f" In-order Traversal is {inorder(root)}") print(f" Pre-order Traversal is {preorder(root)}") print(f"Post-order Traversal is {postorder(root)}") print(f"Height of Tree is {height(root)}") print("Complete Level Order Traversal is : ") level_order_1(root) print("\nLevel-wise order Traversal is : ") for h in range(1, height(root) + 1): level_order_2(root, h) print("\nZigZag order Traversal is : ") zigzag(root) print() if __name__ == "__main__": import doctest doctest.testmod() main()
# https://en.wikipedia.org/wiki/Tree_traversal class Node: """ A Node has data variable and pointers to its left and right nodes. """ def __init__(self, data): self.left = None self.right = None self.data = data def make_tree() -> Node: root = Node(1) root.left = Node(2) root.right = Node(3) root.left.left = Node(4) root.left.right = Node(5) return root def preorder(root: Node): """ Pre-order traversal visits root node, left subtree, right subtree. >>> preorder(make_tree()) [1, 2, 4, 5, 3] """ return [root.data] + preorder(root.left) + preorder(root.right) if root else [] def postorder(root: Node): """ Post-order traversal visits left subtree, right subtree, root node. >>> postorder(make_tree()) [4, 5, 2, 3, 1] """ return postorder(root.left) + postorder(root.right) + [root.data] if root else [] def inorder(root: Node): """ In-order traversal visits left subtree, root node, right subtree. >>> inorder(make_tree()) [4, 2, 5, 1, 3] """ return inorder(root.left) + [root.data] + inorder(root.right) if root else [] def height(root: Node): """ Recursive function for calculating the height of the binary tree. >>> height(None) 0 >>> height(make_tree()) 3 """ return (max(height(root.left), height(root.right)) + 1) if root else 0 def level_order_1(root: Node): """ Print whole binary tree in Level Order Traverse. Level Order traverse: Visit nodes of the tree level-by-level. """ if not root: return temp = root que = [temp] while len(que) > 0: print(que[0].data, end=" ") temp = que.pop(0) if temp.left: que.append(temp.left) if temp.right: que.append(temp.right) return que def level_order_2(root: Node, level: int): """ Level-wise traversal: Print all nodes present at the given level of the binary tree """ if not root: return root if level == 1: print(root.data, end=" ") elif level > 1: level_order_2(root.left, level - 1) level_order_2(root.right, level - 1) def print_left_to_right(root: Node, level: int): """ Print elements on particular level from left to right direction of the binary tree. """ if not root: return if level == 1: print(root.data, end=" ") elif level > 1: print_left_to_right(root.left, level - 1) print_left_to_right(root.right, level - 1) def print_right_to_left(root: Node, level: int): """ Print elements on particular level from right to left direction of the binary tree. """ if not root: return if level == 1: print(root.data, end=" ") elif level > 1: print_right_to_left(root.right, level - 1) print_right_to_left(root.left, level - 1) def zigzag(root: Node): """ ZigZag traverse: Print node left to right and right to left, alternatively. """ flag = 0 height_tree = height(root) for h in range(1, height_tree + 1): if flag == 0: print_left_to_right(root, h) flag = 1 else: print_right_to_left(root, h) flag = 0 def main(): # Main function for testing. """ Create binary tree. """ root = make_tree() """ All Traversals of the binary are as follows: """ print(f" In-order Traversal is {inorder(root)}") print(f" Pre-order Traversal is {preorder(root)}") print(f"Post-order Traversal is {postorder(root)}") print(f"Height of Tree is {height(root)}") print("Complete Level Order Traversal is : ") level_order_1(root) print("\nLevel-wise order Traversal is : ") for h in range(1, height(root) + 1): level_order_2(root, h) print("\nZigZag order Traversal is : ") zigzag(root) print() if __name__ == "__main__": import doctest doctest.testmod() main()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" The Fibonacci sequence is defined by the recurrence relation: Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: F1 = 1 F2 = 1 F3 = 2 F4 = 3 F5 = 5 F6 = 8 F7 = 13 F8 = 21 F9 = 34 F10 = 55 F11 = 89 F12 = 144 The 12th term, F12, is the first term to contain three digits. What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ def solution(n: int = 1000) -> int: """Returns the index of the first term in the Fibonacci sequence to contain n digits. >>> solution(1000) 4782 >>> solution(100) 476 >>> solution(50) 237 >>> solution(3) 12 """ f1, f2 = 1, 1 index = 2 while True: i = 0 f = f1 + f2 f1, f2 = f2, f index += 1 for j in str(f): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
""" The Fibonacci sequence is defined by the recurrence relation: Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1. Hence the first 12 terms will be: F1 = 1 F2 = 1 F3 = 2 F4 = 3 F5 = 5 F6 = 8 F7 = 13 F8 = 21 F9 = 34 F10 = 55 F11 = 89 F12 = 144 The 12th term, F12, is the first term to contain three digits. What is the index of the first term in the Fibonacci sequence to contain 1000 digits? """ def solution(n: int = 1000) -> int: """Returns the index of the first term in the Fibonacci sequence to contain n digits. >>> solution(1000) 4782 >>> solution(100) 476 >>> solution(50) 237 >>> solution(3) 12 """ f1, f2 = 1, 1 index = 2 while True: i = 0 f = f1 + f2 f1, f2 = f2, f index += 1 for j in str(f): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Ordered fractions Problem 71 https://projecteuler.net/problem=71 Consider the fraction n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction. If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get: 1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8 It can be seen that 2/5 is the fraction immediately to the left of 3/7. By listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending order of size, find the numerator of the fraction immediately to the left of 3/7. """ def solution(numerator: int = 3, denominator: int = 7, limit: int = 1000000) -> int: """ Returns the closest numerator of the fraction immediately to the left of given fraction (numerator/denominator) from a list of reduced proper fractions. >>> solution() 428570 >>> solution(3, 7, 8) 2 >>> solution(6, 7, 60) 47 """ max_numerator = 0 max_denominator = 1 for current_denominator in range(1, limit + 1): current_numerator = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: max_numerator = current_numerator max_denominator = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1000000))
""" Ordered fractions Problem 71 https://projecteuler.net/problem=71 Consider the fraction n/d, where n and d are positive integers. If n<d and HCF(n,d)=1, it is called a reduced proper fraction. If we list the set of reduced proper fractions for d ≤ 8 in ascending order of size, we get: 1/8, 1/7, 1/6, 1/5, 1/4, 2/7, 1/3, 3/8, 2/5, 3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8 It can be seen that 2/5 is the fraction immediately to the left of 3/7. By listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending order of size, find the numerator of the fraction immediately to the left of 3/7. """ def solution(numerator: int = 3, denominator: int = 7, limit: int = 1000000) -> int: """ Returns the closest numerator of the fraction immediately to the left of given fraction (numerator/denominator) from a list of reduced proper fractions. >>> solution() 428570 >>> solution(3, 7, 8) 2 >>> solution(6, 7, 60) 47 """ max_numerator = 0 max_denominator = 1 for current_denominator in range(1, limit + 1): current_numerator = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: max_numerator = current_numerator max_denominator = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1000000))
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Convert a string of characters to a sequence of numbers corresponding to the character's position in the alphabet. https://www.dcode.fr/letter-number-cipher http://bestcodes.weebly.com/a1z26.html """ def encode(plain: str) -> list[int]: """ >>> encode("myname") [13, 25, 14, 1, 13, 5] """ return [ord(elem) - 96 for elem in plain] def decode(encoded: list[int]) -> str: """ >>> decode([13, 25, 14, 1, 13, 5]) 'myname' """ return "".join(chr(elem + 96) for elem in encoded) def main() -> None: encoded = encode(input("-> ").strip().lower()) print("Encoded: ", encoded) print("Decoded:", decode(encoded)) if __name__ == "__main__": main()
""" Convert a string of characters to a sequence of numbers corresponding to the character's position in the alphabet. https://www.dcode.fr/letter-number-cipher http://bestcodes.weebly.com/a1z26.html """ def encode(plain: str) -> list[int]: """ >>> encode("myname") [13, 25, 14, 1, 13, 5] """ return [ord(elem) - 96 for elem in plain] def decode(encoded: list[int]) -> str: """ >>> decode([13, 25, 14, 1, 13, 5]) 'myname' """ return "".join(chr(elem + 96) for elem in encoded) def main() -> None: encoded = encode(input("-> ").strip().lower()) print("Encoded: ", encoded) print("Decoded:", decode(encoded)) if __name__ == "__main__": main()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Problem 20: https://projecteuler.net/problem=20 n! means n × (n − 1) × ... × 3 × 2 × 1 For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! """ def solution(num: int = 100) -> int: """Returns the sum of the digits in the factorial of num >>> solution(100) 648 >>> solution(50) 216 >>> solution(10) 27 >>> solution(5) 3 >>> solution(3) 6 >>> solution(2) 2 >>> solution(1) 1 """ fact = 1 result = 0 for i in range(1, num + 1): fact *= i for j in str(fact): result += int(j) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
""" Problem 20: https://projecteuler.net/problem=20 n! means n × (n − 1) × ... × 3 × 2 × 1 For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800, and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27. Find the sum of the digits in the number 100! """ def solution(num: int = 100) -> int: """Returns the sum of the digits in the factorial of num >>> solution(100) 648 >>> solution(50) 216 >>> solution(10) 27 >>> solution(5) 3 >>> solution(3) 6 >>> solution(2) 2 >>> solution(1) 1 """ fact = 1 result = 0 for i in range(1, num + 1): fact *= i for j in str(fact): result += int(j) return result if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" This is a pure Python implementation of the bogosort algorithm, also known as permutation sort, stupid sort, slowsort, shotgun sort, or monkey sort. Bogosort generates random permutations until it guesses the correct one. More info on: https://en.wikipedia.org/wiki/Bogosort For doctests run following command: python -m doctest -v bogo_sort.py or python3 -m doctest -v bogo_sort.py For manual testing run: python bogo_sort.py """ import random def bogo_sort(collection): """Pure implementation of the bogosort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bogo_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bogo_sort([]) [] >>> bogo_sort([-2, -5, -45]) [-45, -5, -2] """ def is_sorted(collection): if len(collection) < 2: return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False return True while not is_sorted(collection): random.shuffle(collection) return collection if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(bogo_sort(unsorted))
""" This is a pure Python implementation of the bogosort algorithm, also known as permutation sort, stupid sort, slowsort, shotgun sort, or monkey sort. Bogosort generates random permutations until it guesses the correct one. More info on: https://en.wikipedia.org/wiki/Bogosort For doctests run following command: python -m doctest -v bogo_sort.py or python3 -m doctest -v bogo_sort.py For manual testing run: python bogo_sort.py """ import random def bogo_sort(collection): """Pure implementation of the bogosort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bogo_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> bogo_sort([]) [] >>> bogo_sort([-2, -5, -45]) [-45, -5, -2] """ def is_sorted(collection): if len(collection) < 2: return True for i in range(len(collection) - 1): if collection[i] > collection[i + 1]: return False return True while not is_sorted(collection): random.shuffle(collection) return collection if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] print(bogo_sort(unsorted))
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Created by sarathkaul on 17/11/19 # Modified by Arkadip Bhattacharya(@darkmatter18) on 20/04/2020 from collections import defaultdict def word_occurence(sentence: str) -> dict: """ >>> from collections import Counter >>> SENTENCE = "a b A b c b d b d e f e g e h e i e j e 0" >>> occurence_dict = word_occurence(SENTENCE) >>> all(occurence_dict[word] == count for word, count ... in Counter(SENTENCE.split()).items()) True >>> dict(word_occurence("Two spaces")) {'Two': 1, 'spaces': 1} """ occurrence = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 return occurrence if __name__ == "__main__": for word, count in word_occurence("INPUT STRING").items(): print(f"{word}: {count}")
# Created by sarathkaul on 17/11/19 # Modified by Arkadip Bhattacharya(@darkmatter18) on 20/04/2020 from collections import defaultdict def word_occurence(sentence: str) -> dict: """ >>> from collections import Counter >>> SENTENCE = "a b A b c b d b d e f e g e h e i e j e 0" >>> occurence_dict = word_occurence(SENTENCE) >>> all(occurence_dict[word] == count for word, count ... in Counter(SENTENCE.split()).items()) True >>> dict(word_occurence("Two spaces")) {'Two': 1, 'spaces': 1} """ occurrence = defaultdict(int) # Creating a dictionary containing count of each word for word in sentence.split(): occurrence[word] += 1 return occurrence if __name__ == "__main__": for word, count in word_occurence("INPUT STRING").items(): print(f"{word}: {count}")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
#!/usr/bin/env python3 """ Build a simple bare-minimum quantum circuit that starts with a single qubit (by default, in state 0), runs the experiment 1000 times, and finally prints the total count of the states finally observed. Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ import qiskit as q def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: """ >>> single_qubit_measure(1, 1) {'0': 1000} """ # Use Aer's qasm_simulator simulator = q.Aer.get_backend("qasm_simulator") # Create a Quantum Circuit acting on the q register circuit = q.QuantumCircuit(qubits, classical_bits) # Map the quantum measurement to the classical bits circuit.measure([0], [0]) # Execute the circuit on the qasm simulator job = q.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) if __name__ == "__main__": print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
#!/usr/bin/env python3 """ Build a simple bare-minimum quantum circuit that starts with a single qubit (by default, in state 0), runs the experiment 1000 times, and finally prints the total count of the states finally observed. Qiskit Docs: https://qiskit.org/documentation/getting_started.html """ import qiskit as q def single_qubit_measure(qubits: int, classical_bits: int) -> q.result.counts.Counts: """ >>> single_qubit_measure(1, 1) {'0': 1000} """ # Use Aer's qasm_simulator simulator = q.Aer.get_backend("qasm_simulator") # Create a Quantum Circuit acting on the q register circuit = q.QuantumCircuit(qubits, classical_bits) # Map the quantum measurement to the classical bits circuit.measure([0], [0]) # Execute the circuit on the qasm simulator job = q.execute(circuit, simulator, shots=1000) # Return the histogram data of the results of the experiment. return job.result().get_counts(circuit) if __name__ == "__main__": print(f"Total count for various states are: {single_qubit_measure(1, 1)}")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# A Python implementation of the Banker's Algorithm in Operating Systems using # Processes and Resources # { # "Author: "Biney Kingsley ([email protected]), [email protected]", # "Date": 28-10-2018 # } """ The Banker's algorithm is a resource allocation and deadlock avoidance algorithm developed by Edsger Dijkstra that tests for safety by simulating the allocation of predetermined maximum possible amounts of all resources, and then makes a "s-state" check to test for possible deadlock conditions for all other pending activities, before deciding whether allocation should be allowed to continue. [Source] Wikipedia [Credit] Rosetta Code C implementation helped very much. (https://rosettacode.org/wiki/Banker%27s_algorithm) """ from __future__ import annotations import time import numpy as np test_claim_vector = [8, 5, 9, 7] test_allocated_res_table = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] test_maximum_claim_table = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class BankersAlgorithm: def __init__( self, claim_vector: list[int], allocated_resources_table: list[list[int]], maximum_claim_table: list[list[int]], ) -> None: """ :param claim_vector: A nxn/nxm list depicting the amount of each resources (eg. memory, interface, semaphores, etc.) available. :param allocated_resources_table: A nxn/nxm list depicting the amount of each resource each process is currently holding :param maximum_claim_table: A nxn/nxm list depicting how much of each resource the system currently has available """ self.__claim_vector = claim_vector self.__allocated_resources_table = allocated_resources_table self.__maximum_claim_table = maximum_claim_table def __processes_resource_summation(self) -> list[int]: """ Check for allocated resources in line with each resource in the claim vector """ return [ sum(p_item[i] for p_item in self.__allocated_resources_table) for i in range(len(self.__allocated_resources_table[0])) ] def __available_resources(self) -> list[int]: """ Check for available resources in line with each resource in the claim vector """ return np.array(self.__claim_vector) - np.array( self.__processes_resource_summation() ) def __need(self) -> list[list[int]]: """ Implement safety checker that calculates the needs by ensuring that max_claim[i][j] - alloc_table[i][j] <= avail[j] """ return [ list(np.array(self.__maximum_claim_table[i]) - np.array(allocated_resource)) for i, allocated_resource in enumerate(self.__allocated_resources_table) ] def __need_index_manager(self) -> dict[int, list[int]]: """ This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() ... ) # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ return {self.__need().index(i): i for i in self.__need()} def main(self, **kwargs) -> None: """ Utilize various methods in this class to simulate the Banker's algorithm Return: None >>> BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table).main(describe=True) Allocated Resource Table P1 2 0 1 1 <BLANKLINE> P2 0 1 2 1 <BLANKLINE> P3 4 0 0 3 <BLANKLINE> P4 0 2 1 0 <BLANKLINE> P5 1 0 3 0 <BLANKLINE> System Resource Table P1 3 2 1 4 <BLANKLINE> P2 0 2 5 2 <BLANKLINE> P3 5 1 0 5 <BLANKLINE> P4 1 5 3 0 <BLANKLINE> P5 3 0 3 3 <BLANKLINE> Current Usage by Active Processes: 8 5 9 7 Initial Available Resources: 1 2 2 2 __________________________________________________ <BLANKLINE> Process 3 is executing. Updated available resource stack for processes: 5 2 2 5 The process is in a safe state. <BLANKLINE> Process 1 is executing. Updated available resource stack for processes: 7 2 3 6 The process is in a safe state. <BLANKLINE> Process 2 is executing. Updated available resource stack for processes: 7 3 5 7 The process is in a safe state. <BLANKLINE> Process 4 is executing. Updated available resource stack for processes: 7 5 6 7 The process is in a safe state. <BLANKLINE> Process 5 is executing. Updated available resource stack for processes: 8 5 9 7 The process is in a safe state. <BLANKLINE> """ need_list = self.__need() alloc_resources_table = self.__allocated_resources_table available_resources = self.__available_resources() need_index_manager = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n") while need_list: safe = False for each_need in need_list: execution = True for index, need in enumerate(each_need): if need > available_resources[index]: execution = False break if execution: safe = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: process_number = original_need_index print(f"Process {process_number + 1} is executing.") # remove the process run from stack need_list.remove(each_need) # update available/freed resources stack available_resources = np.array(available_resources) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(x) for x in available_resources]) ) break if safe: print("The process is in a safe state.\n") else: print("System in unsafe state. Aborting...\n") break def __pretty_data(self): """ Properly align display of the algorithm's solution """ print(" " * 9 + "Allocated Resource Table") for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(item) + 1}" + " ".join(f"{it:>8}" for it in item) + "\n" ) print(" " * 9 + "System Resource Table") for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(item) + 1}" + " ".join(f"{it:>8}" for it in item) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(x) for x in self.__claim_vector) ) print( "Initial Available Resources: " + " ".join(str(x) for x in self.__available_resources()) ) time.sleep(1) if __name__ == "__main__": import doctest doctest.testmod()
# A Python implementation of the Banker's Algorithm in Operating Systems using # Processes and Resources # { # "Author: "Biney Kingsley ([email protected]), [email protected]", # "Date": 28-10-2018 # } """ The Banker's algorithm is a resource allocation and deadlock avoidance algorithm developed by Edsger Dijkstra that tests for safety by simulating the allocation of predetermined maximum possible amounts of all resources, and then makes a "s-state" check to test for possible deadlock conditions for all other pending activities, before deciding whether allocation should be allowed to continue. [Source] Wikipedia [Credit] Rosetta Code C implementation helped very much. (https://rosettacode.org/wiki/Banker%27s_algorithm) """ from __future__ import annotations import time import numpy as np test_claim_vector = [8, 5, 9, 7] test_allocated_res_table = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] test_maximum_claim_table = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class BankersAlgorithm: def __init__( self, claim_vector: list[int], allocated_resources_table: list[list[int]], maximum_claim_table: list[list[int]], ) -> None: """ :param claim_vector: A nxn/nxm list depicting the amount of each resources (eg. memory, interface, semaphores, etc.) available. :param allocated_resources_table: A nxn/nxm list depicting the amount of each resource each process is currently holding :param maximum_claim_table: A nxn/nxm list depicting how much of each resource the system currently has available """ self.__claim_vector = claim_vector self.__allocated_resources_table = allocated_resources_table self.__maximum_claim_table = maximum_claim_table def __processes_resource_summation(self) -> list[int]: """ Check for allocated resources in line with each resource in the claim vector """ return [ sum(p_item[i] for p_item in self.__allocated_resources_table) for i in range(len(self.__allocated_resources_table[0])) ] def __available_resources(self) -> list[int]: """ Check for available resources in line with each resource in the claim vector """ return np.array(self.__claim_vector) - np.array( self.__processes_resource_summation() ) def __need(self) -> list[list[int]]: """ Implement safety checker that calculates the needs by ensuring that max_claim[i][j] - alloc_table[i][j] <= avail[j] """ return [ list(np.array(self.__maximum_claim_table[i]) - np.array(allocated_resource)) for i, allocated_resource in enumerate(self.__allocated_resources_table) ] def __need_index_manager(self) -> dict[int, list[int]]: """ This function builds an index control dictionary to track original ids/indices of processes when altered during execution of method "main" Return: {0: [a: int, b: int], 1: [c: int, d: int]} >>> (BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table)._BankersAlgorithm__need_index_manager() ... ) # doctest: +NORMALIZE_WHITESPACE {0: [1, 2, 0, 3], 1: [0, 1, 3, 1], 2: [1, 1, 0, 2], 3: [1, 3, 2, 0], 4: [2, 0, 0, 3]} """ return {self.__need().index(i): i for i in self.__need()} def main(self, **kwargs) -> None: """ Utilize various methods in this class to simulate the Banker's algorithm Return: None >>> BankersAlgorithm(test_claim_vector, test_allocated_res_table, ... test_maximum_claim_table).main(describe=True) Allocated Resource Table P1 2 0 1 1 <BLANKLINE> P2 0 1 2 1 <BLANKLINE> P3 4 0 0 3 <BLANKLINE> P4 0 2 1 0 <BLANKLINE> P5 1 0 3 0 <BLANKLINE> System Resource Table P1 3 2 1 4 <BLANKLINE> P2 0 2 5 2 <BLANKLINE> P3 5 1 0 5 <BLANKLINE> P4 1 5 3 0 <BLANKLINE> P5 3 0 3 3 <BLANKLINE> Current Usage by Active Processes: 8 5 9 7 Initial Available Resources: 1 2 2 2 __________________________________________________ <BLANKLINE> Process 3 is executing. Updated available resource stack for processes: 5 2 2 5 The process is in a safe state. <BLANKLINE> Process 1 is executing. Updated available resource stack for processes: 7 2 3 6 The process is in a safe state. <BLANKLINE> Process 2 is executing. Updated available resource stack for processes: 7 3 5 7 The process is in a safe state. <BLANKLINE> Process 4 is executing. Updated available resource stack for processes: 7 5 6 7 The process is in a safe state. <BLANKLINE> Process 5 is executing. Updated available resource stack for processes: 8 5 9 7 The process is in a safe state. <BLANKLINE> """ need_list = self.__need() alloc_resources_table = self.__allocated_resources_table available_resources = self.__available_resources() need_index_manager = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("_" * 50 + "\n") while need_list: safe = False for each_need in need_list: execution = True for index, need in enumerate(each_need): if need > available_resources[index]: execution = False break if execution: safe = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: process_number = original_need_index print(f"Process {process_number + 1} is executing.") # remove the process run from stack need_list.remove(each_need) # update available/freed resources stack available_resources = np.array(available_resources) + np.array( alloc_resources_table[process_number] ) print( "Updated available resource stack for processes: " + " ".join([str(x) for x in available_resources]) ) break if safe: print("The process is in a safe state.\n") else: print("System in unsafe state. Aborting...\n") break def __pretty_data(self): """ Properly align display of the algorithm's solution """ print(" " * 9 + "Allocated Resource Table") for item in self.__allocated_resources_table: print( f"P{self.__allocated_resources_table.index(item) + 1}" + " ".join(f"{it:>8}" for it in item) + "\n" ) print(" " * 9 + "System Resource Table") for item in self.__maximum_claim_table: print( f"P{self.__maximum_claim_table.index(item) + 1}" + " ".join(f"{it:>8}" for it in item) + "\n" ) print( "Current Usage by Active Processes: " + " ".join(str(x) for x in self.__claim_vector) ) print( "Initial Available Resources: " + " ".join(str(x) for x in self.__available_resources()) ) time.sleep(1) if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Implementation Burke's algorithm (dithering) """ import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey class Burkes: """ Burke's algorithm is using for converting grayscale image to black and white version Source: Source: https://en.wikipedia.org/wiki/Dither Note: * Best results are given with threshold= ~1/2 * max greyscale value. * This implementation get RGB image and converts it to greyscale in runtime. """ def __init__(self, input_img, threshold: int): self.min_threshold = 0 # max greyscale value for #FFFFFF self.max_threshold = int(self.get_greyscale(255, 255, 255)) if not self.min_threshold < threshold < self.max_threshold: raise ValueError(f"Factor value should be from 0 to {self.max_threshold}") self.input_img = input_img self.threshold = threshold self.width, self.height = self.input_img.shape[1], self.input_img.shape[0] # error table size (+4 columns and +1 row) greater than input image because of # lack of if statements self.error_table = [ [0 for _ in range(self.height + 4)] for __ in range(self.width + 1) ] self.output_img = np.ones((self.width, self.height, 3), np.uint8) * 255 @classmethod def get_greyscale(cls, blue: int, green: int, red: int) -> float: """ >>> Burkes.get_greyscale(3, 4, 5) 3.753 """ return 0.114 * blue + 0.587 * green + 0.2126 * red def process(self) -> None: for y in range(self.height): for x in range(self.width): greyscale = int(self.get_greyscale(*self.input_img[y][x])) if self.threshold > greyscale + self.error_table[y][x]: self.output_img[y][x] = (0, 0, 0) current_error = greyscale + self.error_table[x][y] else: self.output_img[y][x] = (255, 255, 255) current_error = greyscale + self.error_table[x][y] - 255 """ Burkes error propagation (`*` is current pixel): * 8/32 4/32 2/32 4/32 8/32 4/32 2/32 """ self.error_table[y][x + 1] += int(8 / 32 * current_error) self.error_table[y][x + 2] += int(4 / 32 * current_error) self.error_table[y + 1][x] += int(8 / 32 * current_error) self.error_table[y + 1][x + 1] += int(4 / 32 * current_error) self.error_table[y + 1][x + 2] += int(2 / 32 * current_error) self.error_table[y + 1][x - 1] += int(4 / 32 * current_error) self.error_table[y + 1][x - 2] += int(2 / 32 * current_error) if __name__ == "__main__": # create Burke's instances with original images in greyscale burkes_instances = [ Burkes(imread("image_data/lena.jpg", 1), threshold) for threshold in (1, 126, 130, 140) ] for burkes in burkes_instances: burkes.process() for burkes in burkes_instances: imshow( f"Original image with dithering threshold: {burkes.threshold}", burkes.output_img, ) waitKey(0) destroyAllWindows()
""" Implementation Burke's algorithm (dithering) """ import numpy as np from cv2 import destroyAllWindows, imread, imshow, waitKey class Burkes: """ Burke's algorithm is using for converting grayscale image to black and white version Source: Source: https://en.wikipedia.org/wiki/Dither Note: * Best results are given with threshold= ~1/2 * max greyscale value. * This implementation get RGB image and converts it to greyscale in runtime. """ def __init__(self, input_img, threshold: int): self.min_threshold = 0 # max greyscale value for #FFFFFF self.max_threshold = int(self.get_greyscale(255, 255, 255)) if not self.min_threshold < threshold < self.max_threshold: raise ValueError(f"Factor value should be from 0 to {self.max_threshold}") self.input_img = input_img self.threshold = threshold self.width, self.height = self.input_img.shape[1], self.input_img.shape[0] # error table size (+4 columns and +1 row) greater than input image because of # lack of if statements self.error_table = [ [0 for _ in range(self.height + 4)] for __ in range(self.width + 1) ] self.output_img = np.ones((self.width, self.height, 3), np.uint8) * 255 @classmethod def get_greyscale(cls, blue: int, green: int, red: int) -> float: """ >>> Burkes.get_greyscale(3, 4, 5) 3.753 """ return 0.114 * blue + 0.587 * green + 0.2126 * red def process(self) -> None: for y in range(self.height): for x in range(self.width): greyscale = int(self.get_greyscale(*self.input_img[y][x])) if self.threshold > greyscale + self.error_table[y][x]: self.output_img[y][x] = (0, 0, 0) current_error = greyscale + self.error_table[x][y] else: self.output_img[y][x] = (255, 255, 255) current_error = greyscale + self.error_table[x][y] - 255 """ Burkes error propagation (`*` is current pixel): * 8/32 4/32 2/32 4/32 8/32 4/32 2/32 """ self.error_table[y][x + 1] += int(8 / 32 * current_error) self.error_table[y][x + 2] += int(4 / 32 * current_error) self.error_table[y + 1][x] += int(8 / 32 * current_error) self.error_table[y + 1][x + 1] += int(4 / 32 * current_error) self.error_table[y + 1][x + 2] += int(2 / 32 * current_error) self.error_table[y + 1][x - 1] += int(4 / 32 * current_error) self.error_table[y + 1][x - 2] += int(2 / 32 * current_error) if __name__ == "__main__": # create Burke's instances with original images in greyscale burkes_instances = [ Burkes(imread("image_data/lena.jpg", 1), threshold) for threshold in (1, 126, 130, 140) ] for burkes in burkes_instances: burkes.process() for burkes in burkes_instances: imshow( f"Original image with dithering threshold: {burkes.threshold}", burkes.output_img, ) waitKey(0) destroyAllWindows()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" == Perfect Number == In number theory, a perfect number is a positive integer that is equal to the sum of its positive divisors, excluding the number itself. For example: 6 ==> divisors[1, 2, 3, 6] Excluding 6, the sum(divisors) is 1 + 2 + 3 = 6 So, 6 is a Perfect Number Other examples of Perfect Numbers: 28, 486, ... https://en.wikipedia.org/wiki/Perfect_number """ def perfect(number: int) -> bool: """ >>> perfect(27) False >>> perfect(28) True >>> perfect(29) False Start from 1 because dividing by 0 will raise ZeroDivisionError. A number at most can be divisible by the half of the number except the number itself. For example, 6 is at most can be divisible by 3 except by 6 itself. """ return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") number = int(input("Enter number: ").strip()) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
""" == Perfect Number == In number theory, a perfect number is a positive integer that is equal to the sum of its positive divisors, excluding the number itself. For example: 6 ==> divisors[1, 2, 3, 6] Excluding 6, the sum(divisors) is 1 + 2 + 3 = 6 So, 6 is a Perfect Number Other examples of Perfect Numbers: 28, 486, ... https://en.wikipedia.org/wiki/Perfect_number """ def perfect(number: int) -> bool: """ >>> perfect(27) False >>> perfect(28) True >>> perfect(29) False Start from 1 because dividing by 0 will raise ZeroDivisionError. A number at most can be divisible by the half of the number except the number itself. For example, 6 is at most can be divisible by 3 except by 6 itself. """ return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number if __name__ == "__main__": print("Program to check whether a number is a Perfect number or not...") number = int(input("Enter number: ").strip()) print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
#!/usr/bin/env python3 from .hash_table import HashTable from .number_theory.prime_numbers import check_prime, next_prime class DoubleHash(HashTable): """ Hash Table example with open addressing and Double Hash """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __hash_function_2(self, value, data): next_prime_gt = ( next_prime(value % self.size_table) if not check_prime(value % self.size_table) else value % self.size_table ) # gt = bigger than return next_prime_gt - (data % next_prime_gt) def __hash_double_function(self, key, data, increment): return (increment * self.__hash_function_2(key, data)) % self.size_table def _collision_resolution(self, key, data=None): i = 1 new_key = self.hash_function(data) while self.values[new_key] is not None and self.values[new_key] != key: new_key = ( self.__hash_double_function(key, data, i) if self.balanced_factor() >= self.lim_charge else None ) if new_key is None: break else: i += 1 return new_key
#!/usr/bin/env python3 from .hash_table import HashTable from .number_theory.prime_numbers import check_prime, next_prime class DoubleHash(HashTable): """ Hash Table example with open addressing and Double Hash """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __hash_function_2(self, value, data): next_prime_gt = ( next_prime(value % self.size_table) if not check_prime(value % self.size_table) else value % self.size_table ) # gt = bigger than return next_prime_gt - (data % next_prime_gt) def __hash_double_function(self, key, data, increment): return (increment * self.__hash_function_2(key, data)) % self.size_table def _collision_resolution(self, key, data=None): i = 1 new_key = self.hash_function(data) while self.values[new_key] is not None and self.values[new_key] != key: new_key = ( self.__hash_double_function(key, data, i) if self.balanced_factor() >= self.lim_charge else None ) if new_key is None: break else: i += 1 return new_key
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" The Mandelbrot set is the set of complex numbers "c" for which the series "z_(n+1) = z_n * z_n + c" does not diverge, i.e. remains bounded. Thus, a complex number "c" is a member of the Mandelbrot set if, when starting with "z_0 = 0" and applying the iteration repeatedly, the absolute value of "z_n" remains bounded for all "n > 0". Complex numbers can be written as "a + b*i": "a" is the real component, usually drawn on the x-axis, and "b*i" is the imaginary component, usually drawn on the y-axis. Most visualizations of the Mandelbrot set use a color-coding to indicate after how many steps in the series the numbers outside the set diverge. Images of the Mandelbrot set exhibit an elaborate and infinitely complicated boundary that reveals progressively ever-finer recursive detail at increasing magnifications, making the boundary of the Mandelbrot set a fractal curve. (description adapted from https://en.wikipedia.org/wiki/Mandelbrot_set ) (see also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set ) """ import colorsys from PIL import Image # type: ignore def get_distance(x: float, y: float, max_step: int) -> float: """ Return the relative distance (= step/max_step) after which the complex number constituted by this x-y-pair diverges. Members of the Mandelbrot set do not diverge so their distance is 1. >>> get_distance(0, 0, 50) 1.0 >>> get_distance(0.5, 0.5, 50) 0.061224489795918366 >>> get_distance(2, 0, 50) 0.0 """ a = x b = y for step in range(max_step): a_new = a * a - b * b + x b = 2 * a * b + y a = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def get_black_and_white_rgb(distance: float) -> tuple: """ Black&white color-coding that ignores the relative distance. The Mandelbrot set is black, everything else is white. >>> get_black_and_white_rgb(0) (255, 255, 255) >>> get_black_and_white_rgb(0.5) (255, 255, 255) >>> get_black_and_white_rgb(1) (0, 0, 0) """ if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def get_color_coded_rgb(distance: float) -> tuple: """ Color-coding taking the relative distance into account. The Mandelbrot set is black. >>> get_color_coded_rgb(0) (255, 0, 0) >>> get_color_coded_rgb(0.5) (0, 255, 255) >>> get_color_coded_rgb(1) (0, 0, 0) """ if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(distance, 1, 1)) def get_image( image_width: int = 800, image_height: int = 600, figure_center_x: float = -0.6, figure_center_y: float = 0, figure_width: float = 3.2, max_step: int = 50, use_distance_color_coding: bool = True, ) -> Image.Image: """ Function to generate the image of the Mandelbrot set. Two types of coordinates are used: image-coordinates that refer to the pixels and figure-coordinates that refer to the complex numbers inside and outside the Mandelbrot set. The figure-coordinates in the arguments of this function determine which section of the Mandelbrot set is viewed. The main area of the Mandelbrot set is roughly between "-1.5 < x < 0.5" and "-1 < y < 1" in the figure-coordinates. >>> get_image().load()[0,0] (255, 0, 0) >>> get_image(use_distance_color_coding = False).load()[0,0] (255, 255, 255) """ img = Image.new("RGB", (image_width, image_height)) pixels = img.load() # loop through the image-coordinates for image_x in range(image_width): for image_y in range(image_height): # determine the figure-coordinates based on the image-coordinates figure_height = figure_width / image_width * image_height figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width figure_y = figure_center_y + (image_y / image_height - 0.5) * figure_height distance = get_distance(figure_x, figure_y, max_step) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: pixels[image_x, image_y] = get_color_coded_rgb(distance) else: pixels[image_x, image_y] = get_black_and_white_rgb(distance) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure img = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
""" The Mandelbrot set is the set of complex numbers "c" for which the series "z_(n+1) = z_n * z_n + c" does not diverge, i.e. remains bounded. Thus, a complex number "c" is a member of the Mandelbrot set if, when starting with "z_0 = 0" and applying the iteration repeatedly, the absolute value of "z_n" remains bounded for all "n > 0". Complex numbers can be written as "a + b*i": "a" is the real component, usually drawn on the x-axis, and "b*i" is the imaginary component, usually drawn on the y-axis. Most visualizations of the Mandelbrot set use a color-coding to indicate after how many steps in the series the numbers outside the set diverge. Images of the Mandelbrot set exhibit an elaborate and infinitely complicated boundary that reveals progressively ever-finer recursive detail at increasing magnifications, making the boundary of the Mandelbrot set a fractal curve. (description adapted from https://en.wikipedia.org/wiki/Mandelbrot_set ) (see also https://en.wikipedia.org/wiki/Plotting_algorithms_for_the_Mandelbrot_set ) """ import colorsys from PIL import Image # type: ignore def get_distance(x: float, y: float, max_step: int) -> float: """ Return the relative distance (= step/max_step) after which the complex number constituted by this x-y-pair diverges. Members of the Mandelbrot set do not diverge so their distance is 1. >>> get_distance(0, 0, 50) 1.0 >>> get_distance(0.5, 0.5, 50) 0.061224489795918366 >>> get_distance(2, 0, 50) 0.0 """ a = x b = y for step in range(max_step): a_new = a * a - b * b + x b = 2 * a * b + y a = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def get_black_and_white_rgb(distance: float) -> tuple: """ Black&white color-coding that ignores the relative distance. The Mandelbrot set is black, everything else is white. >>> get_black_and_white_rgb(0) (255, 255, 255) >>> get_black_and_white_rgb(0.5) (255, 255, 255) >>> get_black_and_white_rgb(1) (0, 0, 0) """ if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def get_color_coded_rgb(distance: float) -> tuple: """ Color-coding taking the relative distance into account. The Mandelbrot set is black. >>> get_color_coded_rgb(0) (255, 0, 0) >>> get_color_coded_rgb(0.5) (0, 255, 255) >>> get_color_coded_rgb(1) (0, 0, 0) """ if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(distance, 1, 1)) def get_image( image_width: int = 800, image_height: int = 600, figure_center_x: float = -0.6, figure_center_y: float = 0, figure_width: float = 3.2, max_step: int = 50, use_distance_color_coding: bool = True, ) -> Image.Image: """ Function to generate the image of the Mandelbrot set. Two types of coordinates are used: image-coordinates that refer to the pixels and figure-coordinates that refer to the complex numbers inside and outside the Mandelbrot set. The figure-coordinates in the arguments of this function determine which section of the Mandelbrot set is viewed. The main area of the Mandelbrot set is roughly between "-1.5 < x < 0.5" and "-1 < y < 1" in the figure-coordinates. >>> get_image().load()[0,0] (255, 0, 0) >>> get_image(use_distance_color_coding = False).load()[0,0] (255, 255, 255) """ img = Image.new("RGB", (image_width, image_height)) pixels = img.load() # loop through the image-coordinates for image_x in range(image_width): for image_y in range(image_height): # determine the figure-coordinates based on the image-coordinates figure_height = figure_width / image_width * image_height figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width figure_y = figure_center_y + (image_y / image_height - 0.5) * figure_height distance = get_distance(figure_x, figure_y, max_step) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: pixels[image_x, image_y] = get_color_coded_rgb(distance) else: pixels[image_x, image_y] = get_black_and_white_rgb(distance) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure img = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Author : Syed Faizan ( 3rd Year IIIT Pune ) Github : faizan2700 Purpose : You have one function f(x) which takes float integer and returns float you have to integrate the function in limits a to b. The approximation proposed by Thomas Simpsons in 1743 is one way to calculate integration. ( read article : https://cp-algorithms.com/num_methods/simpson-integration.html ) simpson_integration() takes function,lower_limit=a,upper_limit=b,precision and returns the integration of function in given limit. """ # constants # the more the number of steps the more accurate N_STEPS = 1000 def f(x: float) -> float: return x * x """ Summary of Simpson Approximation : By simpsons integration : 1. integration of fxdx with limit a to b is = f(x0) + 4 * f(x1) + 2 * f(x2) + 4 * f(x3) + 2 * f(x4)..... + f(xn) where x0 = a xi = a + i * h xn = b """ def simpson_integration(function, a: float, b: float, precision: int = 4) -> float: """ Args: function : the function which's integration is desired a : the lower limit of integration b : upper limit of integraion precision : precision of the result,error required default is 4 Returns: result : the value of the approximated integration of function in range a to b Raises: AssertionError: function is not callable AssertionError: a is not float or integer AssertionError: function should return float or integer AssertionError: b is not float or integer AssertionError: precision is not positive integer >>> simpson_integration(lambda x : x*x,1,2,3) 2.333 >>> simpson_integration(lambda x : x*x,'wrong_input',2,3) Traceback (most recent call last): ... AssertionError: a should be float or integer your input : wrong_input >>> simpson_integration(lambda x : x*x,1,'wrong_input',3) Traceback (most recent call last): ... AssertionError: b should be float or integer your input : wrong_input >>> simpson_integration(lambda x : x*x,1,2,'wrong_input') Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : wrong_input >>> simpson_integration('wrong_input',2,3,4) Traceback (most recent call last): ... AssertionError: the function(object) passed should be callable your input : ... >>> simpson_integration(lambda x : x*x,3.45,3.2,1) -2.8 >>> simpson_integration(lambda x : x*x,3.45,3.2,0) Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : 0 >>> simpson_integration(lambda x : x*x,3.45,3.2,-1) Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : -1 """ assert callable( function ), f"the function(object) passed should be callable your input : {function}" assert isinstance(a, float) or isinstance( a, int ), f"a should be float or integer your input : {a}" assert isinstance(function(a), float) or isinstance(function(a), int), ( "the function should return integer or float return type of your function, " f"{type(a)}" ) assert isinstance(b, float) or isinstance( b, int ), f"b should be float or integer your input : {b}" assert ( isinstance(precision, int) and precision > 0 ), f"precision should be positive integer your input : {precision}" # just applying the formula of simpson for approximate integraion written in # mentioned article in first comment of this file and above this function h = (b - a) / N_STEPS result = function(a) + function(b) for i in range(1, N_STEPS): a1 = a + h * i result += function(a1) * (4 if i % 2 else 2) result *= h / 3 return round(result, precision) if __name__ == "__main__": import doctest doctest.testmod()
""" Author : Syed Faizan ( 3rd Year IIIT Pune ) Github : faizan2700 Purpose : You have one function f(x) which takes float integer and returns float you have to integrate the function in limits a to b. The approximation proposed by Thomas Simpsons in 1743 is one way to calculate integration. ( read article : https://cp-algorithms.com/num_methods/simpson-integration.html ) simpson_integration() takes function,lower_limit=a,upper_limit=b,precision and returns the integration of function in given limit. """ # constants # the more the number of steps the more accurate N_STEPS = 1000 def f(x: float) -> float: return x * x """ Summary of Simpson Approximation : By simpsons integration : 1. integration of fxdx with limit a to b is = f(x0) + 4 * f(x1) + 2 * f(x2) + 4 * f(x3) + 2 * f(x4)..... + f(xn) where x0 = a xi = a + i * h xn = b """ def simpson_integration(function, a: float, b: float, precision: int = 4) -> float: """ Args: function : the function which's integration is desired a : the lower limit of integration b : upper limit of integraion precision : precision of the result,error required default is 4 Returns: result : the value of the approximated integration of function in range a to b Raises: AssertionError: function is not callable AssertionError: a is not float or integer AssertionError: function should return float or integer AssertionError: b is not float or integer AssertionError: precision is not positive integer >>> simpson_integration(lambda x : x*x,1,2,3) 2.333 >>> simpson_integration(lambda x : x*x,'wrong_input',2,3) Traceback (most recent call last): ... AssertionError: a should be float or integer your input : wrong_input >>> simpson_integration(lambda x : x*x,1,'wrong_input',3) Traceback (most recent call last): ... AssertionError: b should be float or integer your input : wrong_input >>> simpson_integration(lambda x : x*x,1,2,'wrong_input') Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : wrong_input >>> simpson_integration('wrong_input',2,3,4) Traceback (most recent call last): ... AssertionError: the function(object) passed should be callable your input : ... >>> simpson_integration(lambda x : x*x,3.45,3.2,1) -2.8 >>> simpson_integration(lambda x : x*x,3.45,3.2,0) Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : 0 >>> simpson_integration(lambda x : x*x,3.45,3.2,-1) Traceback (most recent call last): ... AssertionError: precision should be positive integer your input : -1 """ assert callable( function ), f"the function(object) passed should be callable your input : {function}" assert isinstance(a, float) or isinstance( a, int ), f"a should be float or integer your input : {a}" assert isinstance(function(a), float) or isinstance(function(a), int), ( "the function should return integer or float return type of your function, " f"{type(a)}" ) assert isinstance(b, float) or isinstance( b, int ), f"b should be float or integer your input : {b}" assert ( isinstance(precision, int) and precision > 0 ), f"precision should be positive integer your input : {precision}" # just applying the formula of simpson for approximate integraion written in # mentioned article in first comment of this file and above this function h = (b - a) / N_STEPS result = function(a) + function(b) for i in range(1, N_STEPS): a1 = a + h * i result += function(a1) * (4 if i % 2 else 2) result *= h / 3 return round(result, precision) if __name__ == "__main__": import doctest doctest.testmod()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
"""https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance""" def jaro_winkler(str1: str, str2: str) -> float: """ Jaro–Winkler distance is a string metric measuring an edit distance between two sequences. Output value is between 0.0 and 1.0. >>> jaro_winkler("martha", "marhta") 0.9611111111111111 >>> jaro_winkler("CRATE", "TRACE") 0.7333333333333334 >>> jaro_winkler("test", "dbdbdbdb") 0.0 >>> jaro_winkler("test", "test") 1.0 >>> jaro_winkler("hello world", "HeLLo W0rlD") 0.6363636363636364 >>> jaro_winkler("test", "") 0.0 >>> jaro_winkler("hello", "world") 0.4666666666666666 >>> jaro_winkler("hell**o", "*world") 0.4365079365079365 """ def get_matched_characters(_str1: str, _str2: str) -> str: matched = [] limit = min(len(_str1), len(_str2)) // 2 for i, l in enumerate(_str1): left = int(max(0, i - limit)) right = int(min(i + limit + 1, len(_str2))) if l in _str2[left:right]: matched.append(l) _str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}" return "".join(matched) # matching characters matching_1 = get_matched_characters(str1, str2) matching_2 = get_matched_characters(str2, str1) match_count = len(matching_1) # transposition transpositions = ( len([(c1, c2) for c1, c2 in zip(matching_1, matching_2) if c1 != c2]) // 2 ) if not match_count: jaro = 0.0 else: jaro = ( 1 / 3 * ( match_count / len(str1) + match_count / len(str2) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters prefix_len = 0 for c1, c2 in zip(str1[:4], str2[:4]): if c1 == c2: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
"""https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance""" def jaro_winkler(str1: str, str2: str) -> float: """ Jaro–Winkler distance is a string metric measuring an edit distance between two sequences. Output value is between 0.0 and 1.0. >>> jaro_winkler("martha", "marhta") 0.9611111111111111 >>> jaro_winkler("CRATE", "TRACE") 0.7333333333333334 >>> jaro_winkler("test", "dbdbdbdb") 0.0 >>> jaro_winkler("test", "test") 1.0 >>> jaro_winkler("hello world", "HeLLo W0rlD") 0.6363636363636364 >>> jaro_winkler("test", "") 0.0 >>> jaro_winkler("hello", "world") 0.4666666666666666 >>> jaro_winkler("hell**o", "*world") 0.4365079365079365 """ def get_matched_characters(_str1: str, _str2: str) -> str: matched = [] limit = min(len(_str1), len(_str2)) // 2 for i, l in enumerate(_str1): left = int(max(0, i - limit)) right = int(min(i + limit + 1, len(_str2))) if l in _str2[left:right]: matched.append(l) _str2 = f"{_str2[0:_str2.index(l)]} {_str2[_str2.index(l) + 1:]}" return "".join(matched) # matching characters matching_1 = get_matched_characters(str1, str2) matching_2 = get_matched_characters(str2, str1) match_count = len(matching_1) # transposition transpositions = ( len([(c1, c2) for c1, c2 in zip(matching_1, matching_2) if c1 != c2]) // 2 ) if not match_count: jaro = 0.0 else: jaro = ( 1 / 3 * ( match_count / len(str1) + match_count / len(str2) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters prefix_len = 0 for c1, c2 in zip(str1[:4], str2[:4]): if c1 == c2: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("hello", "world"))
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Implements a disjoint set using Lists and some added heuristics for efficiency Union by Rank Heuristic and Path Compression """ class DisjointSet: def __init__(self, set_counts: list) -> None: """ Initialize with a list of the number of items in each set and with rank = 1 for each set """ self.set_counts = set_counts self.max_set = max(set_counts) num_sets = len(set_counts) self.ranks = [1] * num_sets self.parents = list(range(num_sets)) def merge(self, src: int, dst: int) -> bool: """ Merge two sets together using Union by rank heuristic Return True if successful Merge two disjoint sets >>> A = DisjointSet([1, 1, 1]) >>> A.merge(1, 2) True >>> A.merge(0, 2) True >>> A.merge(0, 1) False """ src_parent = self.get_parent(src) dst_parent = self.get_parent(dst) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] self.set_counts[src_parent] = 0 self.parents[src_parent] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 joined_set_size = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] self.set_counts[dst_parent] = 0 self.parents[dst_parent] = src_parent joined_set_size = self.set_counts[src_parent] self.max_set = max(self.max_set, joined_set_size) return True def get_parent(self, disj_set: int) -> int: """ Find the Parent of a given set >>> A = DisjointSet([1, 1, 1]) >>> A.merge(1, 2) True >>> A.get_parent(0) 0 >>> A.get_parent(1) 2 """ if self.parents[disj_set] == disj_set: return disj_set self.parents[disj_set] = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
""" Implements a disjoint set using Lists and some added heuristics for efficiency Union by Rank Heuristic and Path Compression """ class DisjointSet: def __init__(self, set_counts: list) -> None: """ Initialize with a list of the number of items in each set and with rank = 1 for each set """ self.set_counts = set_counts self.max_set = max(set_counts) num_sets = len(set_counts) self.ranks = [1] * num_sets self.parents = list(range(num_sets)) def merge(self, src: int, dst: int) -> bool: """ Merge two sets together using Union by rank heuristic Return True if successful Merge two disjoint sets >>> A = DisjointSet([1, 1, 1]) >>> A.merge(1, 2) True >>> A.merge(0, 2) True >>> A.merge(0, 1) False """ src_parent = self.get_parent(src) dst_parent = self.get_parent(dst) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] self.set_counts[src_parent] = 0 self.parents[src_parent] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 joined_set_size = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] self.set_counts[dst_parent] = 0 self.parents[dst_parent] = src_parent joined_set_size = self.set_counts[src_parent] self.max_set = max(self.max_set, joined_set_size) return True def get_parent(self, disj_set: int) -> int: """ Find the Parent of a given set >>> A = DisjointSet([1, 1, 1]) >>> A.merge(1, 2) True >>> A.get_parent(0) 0 >>> A.get_parent(1) 2 """ if self.parents[disj_set] == disj_set: return disj_set self.parents[disj_set] = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
# Numbers of alphabet which we call base alphabet_size = 256 # Modulus to hash a string modulus = 1000003 def rabin_karp(pattern: str, text: str) -> bool: """ The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match """ p_len = len(pattern) t_len = len(text) if p_len > t_len: return False p_hash = 0 text_hash = 0 modulus_power = 1 # Calculating the hash of pattern and substring of text for i in range(p_len): p_hash = (ord(pattern[i]) + p_hash * alphabet_size) % modulus text_hash = (ord(text[i]) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue modulus_power = (modulus_power * alphabet_size) % modulus for i in range(0, t_len - p_len + 1): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash text_hash = ( (text_hash - ord(text[i]) * modulus_power) * alphabet_size + ord(text[i + p_len]) ) % modulus return False def test_rabin_karp() -> None: """ >>> test_rabin_karp() Success. """ # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" assert rabin_karp(pattern, text) # Test 3) pattern = "AAAB" text = "ABAAAAAB" assert rabin_karp(pattern, text) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" assert rabin_karp(pattern, text) # Test 5) pattern = "Lü" text = "Lüsai" assert rabin_karp(pattern, text) pattern = "Lue" assert not rabin_karp(pattern, text) print("Success.") if __name__ == "__main__": test_rabin_karp()
# Numbers of alphabet which we call base alphabet_size = 256 # Modulus to hash a string modulus = 1000003 def rabin_karp(pattern: str, text: str) -> bool: """ The Rabin-Karp Algorithm for finding a pattern within a piece of text with complexity O(nm), most efficient when it is used with multiple patterns as it is able to check if any of a set of patterns match a section of text in o(1) given the precomputed hashes. This will be the simple version which only assumes one pattern is being searched for but it's not hard to modify 1) Calculate pattern hash 2) Step through the text one character at a time passing a window with the same length as the pattern calculating the hash of the text within the window compare it with the hash of the pattern. Only testing equality if the hashes match """ p_len = len(pattern) t_len = len(text) if p_len > t_len: return False p_hash = 0 text_hash = 0 modulus_power = 1 # Calculating the hash of pattern and substring of text for i in range(p_len): p_hash = (ord(pattern[i]) + p_hash * alphabet_size) % modulus text_hash = (ord(text[i]) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue modulus_power = (modulus_power * alphabet_size) % modulus for i in range(0, t_len - p_len + 1): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash text_hash = ( (text_hash - ord(text[i]) * modulus_power) * alphabet_size + ord(text[i + p_len]) ) % modulus return False def test_rabin_karp() -> None: """ >>> test_rabin_karp() Success. """ # Test 1) pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" text = "ABABZABABYABABX" assert rabin_karp(pattern, text) # Test 3) pattern = "AAAB" text = "ABAAAAAB" assert rabin_karp(pattern, text) # Test 4) pattern = "abcdabcy" text = "abcxabcdabxabcdabcdabcy" assert rabin_karp(pattern, text) # Test 5) pattern = "Lü" text = "Lüsai" assert rabin_karp(pattern, text) pattern = "Lue" assert not rabin_karp(pattern, text) print("Success.") if __name__ == "__main__": test_rabin_karp()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 6: https://projecteuler.net/problem=6 Sum square difference The sum of the squares of the first ten natural numbers is, 1^2 + 2^2 + ... + 10^2 = 385 The square of the sum of the first ten natural numbers is, (1 + 2 + ... + 10)^2 = 55^2 = 3025 Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640. Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. """ def solution(n: int = 100) -> int: """ Returns the difference between the sum of the squares of the first n natural numbers and the square of the sum. >>> solution(10) 2640 >>> solution(15) 13160 >>> solution(20) 41230 >>> solution(50) 1582700 """ sum_of_squares = 0 sum_of_ints = 0 for i in range(1, n + 1): sum_of_squares += i ** 2 sum_of_ints += i return sum_of_ints ** 2 - sum_of_squares if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 6: https://projecteuler.net/problem=6 Sum square difference The sum of the squares of the first ten natural numbers is, 1^2 + 2^2 + ... + 10^2 = 385 The square of the sum of the first ten natural numbers is, (1 + 2 + ... + 10)^2 = 55^2 = 3025 Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 - 385 = 2640. Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. """ def solution(n: int = 100) -> int: """ Returns the difference between the sum of the squares of the first n natural numbers and the square of the sum. >>> solution(10) 2640 >>> solution(15) 13160 >>> solution(20) 41230 >>> solution(50) 1582700 """ sum_of_squares = 0 sum_of_ints = 0 for i in range(1, n + 1): sum_of_squares += i ** 2 sum_of_ints += i return sum_of_ints ** 2 - sum_of_squares if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Implementation of gaussian filter algorithm """ from itertools import product from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uint8, zeros def gen_gaussian_kernel(k_size, sigma): center = k_size // 2 x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center] g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma))) return g def gaussian_filter(image, k_size, sigma): height, width = image.shape[0], image.shape[1] # dst image height and width dst_height = height - k_size + 1 dst_width = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = zeros((dst_height * dst_width, k_size * k_size)) row = 0 for i, j in product(range(dst_height), range(dst_width)): window = ravel(image[i : i + k_size, j : j + k_size]) image_array[row, :] = window row += 1 # turn the kernel into shape(k*k, 1) gaussian_kernel = gen_gaussian_kernel(k_size, sigma) filter_array = ravel(gaussian_kernel) # reshape and get the dst image dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8) return dst if __name__ == "__main__": # read original image img = imread(r"../image_data/lena.jpg") # turn image in gray scale value gray = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size gaussian3x3 = gaussian_filter(gray, 3, sigma=1) gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussian3x3) imshow("gaussian filter with 5x5 mask", gaussian5x5) waitKey()
""" Implementation of gaussian filter algorithm """ from itertools import product from cv2 import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uint8, zeros def gen_gaussian_kernel(k_size, sigma): center = k_size // 2 x, y = mgrid[0 - center : k_size - center, 0 - center : k_size - center] g = 1 / (2 * pi * sigma) * exp(-(square(x) + square(y)) / (2 * square(sigma))) return g def gaussian_filter(image, k_size, sigma): height, width = image.shape[0], image.shape[1] # dst image height and width dst_height = height - k_size + 1 dst_width = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows image_array = zeros((dst_height * dst_width, k_size * k_size)) row = 0 for i, j in product(range(dst_height), range(dst_width)): window = ravel(image[i : i + k_size, j : j + k_size]) image_array[row, :] = window row += 1 # turn the kernel into shape(k*k, 1) gaussian_kernel = gen_gaussian_kernel(k_size, sigma) filter_array = ravel(gaussian_kernel) # reshape and get the dst image dst = dot(image_array, filter_array).reshape(dst_height, dst_width).astype(uint8) return dst if __name__ == "__main__": # read original image img = imread(r"../image_data/lena.jpg") # turn image in gray scale value gray = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size gaussian3x3 = gaussian_filter(gray, 3, sigma=1) gaussian5x5 = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussian3x3) imshow("gaussian filter with 5x5 mask", gaussian5x5) waitKey()
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
def bubble_sort(collection): """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bubble_sort([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True >>> bubble_sort([]) == sorted([]) True >>> bubble_sort([-2, -45, -5]) == sorted([-2, -45, -5]) True >>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True >>> bubble_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c']) True >>> import random >>> collection = random.sample(range(-50, 50), 100) >>> bubble_sort(collection) == sorted(collection) True >>> import string >>> collection = random.choices(string.ascii_letters + string.digits, k=100) >>> bubble_sort(collection) == sorted(collection) True """ length = len(collection) for i in range(length - 1): swapped = False for j in range(length - 1 - i): if collection[j] > collection[j + 1]: swapped = True collection[j], collection[j + 1] = collection[j + 1], collection[j] if not swapped: break # Stop iteration if the collection is sorted. return collection if __name__ == "__main__": import doctest import time doctest.testmod() user_input = input("Enter numbers separated by a comma:").strip() unsorted = [int(item) for item in user_input.split(",")] start = time.process_time() print(*bubble_sort(unsorted), sep=",") print(f"Processing time: {time.process_time() - start}")
def bubble_sort(collection): """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside :return: the same collection ordered by ascending Examples: >>> bubble_sort([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] >>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True >>> bubble_sort([]) == sorted([]) True >>> bubble_sort([-2, -45, -5]) == sorted([-2, -45, -5]) True >>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True >>> bubble_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c']) True >>> import random >>> collection = random.sample(range(-50, 50), 100) >>> bubble_sort(collection) == sorted(collection) True >>> import string >>> collection = random.choices(string.ascii_letters + string.digits, k=100) >>> bubble_sort(collection) == sorted(collection) True """ length = len(collection) for i in range(length - 1): swapped = False for j in range(length - 1 - i): if collection[j] > collection[j + 1]: swapped = True collection[j], collection[j + 1] = collection[j + 1], collection[j] if not swapped: break # Stop iteration if the collection is sorted. return collection if __name__ == "__main__": import doctest import time doctest.testmod() user_input = input("Enter numbers separated by a comma:").strip() unsorted = [int(item) for item in user_input.split(",")] start = time.process_time() print(*bubble_sort(unsorted), sep=",") print(f"Processing time: {time.process_time() - start}")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
""" Project Euler Problem 3: https://projecteuler.net/problem=3 Largest prime factor The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143? References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ def solution(n: int = 600851475143) -> int: """ Returns the largest prime factor of a given number n. >>> solution(13195) 29 >>> solution(10) 5 >>> solution(17) 17 >>> solution(3.4) 3 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") prime = 1 i = 2 while i * i <= n: while n % i == 0: prime = i n //= i i += 1 if n > 1: prime = n return int(prime) if __name__ == "__main__": print(f"{solution() = }")
""" Project Euler Problem 3: https://projecteuler.net/problem=3 Largest prime factor The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143? References: - https://en.wikipedia.org/wiki/Prime_number#Unique_factorization """ def solution(n: int = 600851475143) -> int: """ Returns the largest prime factor of a given number n. >>> solution(13195) 29 >>> solution(10) 5 >>> solution(17) 17 >>> solution(3.4) 3 >>> solution(0) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution(-17) Traceback (most recent call last): ... ValueError: Parameter n must be greater than or equal to one. >>> solution([]) Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. >>> solution("asd") Traceback (most recent call last): ... TypeError: Parameter n must be int or castable to int. """ try: n = int(n) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int.") if n <= 0: raise ValueError("Parameter n must be greater than or equal to one.") prime = 1 i = 2 while i * i <= n: while n % i == 0: prime = i n //= i i += 1 if n > 1: prime = n return int(prime) if __name__ == "__main__": print(f"{solution() = }")
-1
TheAlgorithms/Python
4,293
[mypy] fix small folders 2
### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
algobytewise
"2021-03-26T10:13:28Z"
"2021-03-26T11:21:17Z"
959507901ac8f10cd605c51c305d13b27d105536
9b60be67afca18f0d5e50e532096a68605d61b81
[mypy] fix small folders 2. ### **Describe your change:** Related Issue: #4052 * [ ] Add an algorithm? * [x] Fix a bug or typo in an existing algorithm? * [ ] Documentation change? ### **Checklist:** * [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md). * [x] This pull request is all my own work -- I have not plagiarized. * [x] I know that pull requests will not be merged if they fail the automated tests. * [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms. * [ ] All new Python files are placed inside an existing directory. * [ ] All filenames are in all lowercase characters with no spaces or dashes. * [ ] All functions and variable names follow Python naming conventions. * [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html). * [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing. * [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation. * [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
def printDist(dist, V): print("\nVertex Distance") for i in range(V): if dist[i] != float("inf"): print(i, "\t", int(dist[i]), end="\t") else: print(i, "\t", "INF", end="\t") print() def minDist(mdist, vset, V): minVal = float("inf") minInd = -1 for i in range(V): if (not vset[i]) and mdist[i] < minVal: minInd = i minVal = mdist[i] return minInd def Dijkstra(graph, V, src): mdist = [float("inf") for i in range(V)] vset = [False for i in range(V)] mdist[src] = 0.0 for i in range(V - 1): u = minDist(mdist, vset, V) vset[u] = True for v in range(V): if ( (not vset[v]) and graph[u][v] != float("inf") and mdist[u] + graph[u][v] < mdist[v] ): mdist[v] = mdist[u] + graph[u][v] printDist(mdist, V) if __name__ == "__main__": V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) graph = [[float("inf") for i in range(V)] for j in range(V)] for i in range(V): graph[i][i] = 0.0 for i in range(E): print("\nEdge ", i + 1) src = int(input("Enter source:").strip()) dst = int(input("Enter destination:").strip()) weight = float(input("Enter weight:").strip()) graph[src][dst] = weight gsrc = int(input("\nEnter shortest path source:").strip()) Dijkstra(graph, V, gsrc)
def printDist(dist, V): print("\nVertex Distance") for i in range(V): if dist[i] != float("inf"): print(i, "\t", int(dist[i]), end="\t") else: print(i, "\t", "INF", end="\t") print() def minDist(mdist, vset, V): minVal = float("inf") minInd = -1 for i in range(V): if (not vset[i]) and mdist[i] < minVal: minInd = i minVal = mdist[i] return minInd def Dijkstra(graph, V, src): mdist = [float("inf") for i in range(V)] vset = [False for i in range(V)] mdist[src] = 0.0 for i in range(V - 1): u = minDist(mdist, vset, V) vset[u] = True for v in range(V): if ( (not vset[v]) and graph[u][v] != float("inf") and mdist[u] + graph[u][v] < mdist[v] ): mdist[v] = mdist[u] + graph[u][v] printDist(mdist, V) if __name__ == "__main__": V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) graph = [[float("inf") for i in range(V)] for j in range(V)] for i in range(V): graph[i][i] = 0.0 for i in range(E): print("\nEdge ", i + 1) src = int(input("Enter source:").strip()) dst = int(input("Enter destination:").strip()) weight = float(input("Enter weight:").strip()) graph[src][dst] = weight gsrc = int(input("\nEnter shortest path source:").strip()) Dijkstra(graph, V, gsrc)
-1