language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def return_codon_usage_table( sequences=_preload_sequences(), translation_table_str=TRANSL_TABLE_11 ): """ Receives a list of gene sequences and a translation table string Returns a string with all bases and their frequencies in a table with the following fields: codon_triplet: amino_acid_letter frequency_per_1000 absolute_occurrences Skip invalid coding sequences: --> must consist entirely of codons (3-base triplet) """ table = [] table.append('| Codon AA Freq Count | Codon AA Freq Count | Codon AA Freq Count | Codon AA Freq Count |') formatted_translation_table = {} for line in translation_table_str.splitlines(): pair = line.split(' = ') if len(pair) == 2: key = pair[0].strip() value = pair[1].strip() formatted_translation_table[key] = value formatted_translation_table['Base1'] = formatted_translation_table['Base1'].replace('T','U') formatted_translation_table['Base2'] = formatted_translation_table['Base2'].replace('T','U') formatted_translation_table['Base3'] = formatted_translation_table['Base3'].replace('T','U') print(formatted_translation_table) joined_sequences = "".join(sequences) base_len = len(joined_sequences) - 2 no_of_sequences = len(sequences) codon = collections.namedtuple('Codon', ['name', 'AA', 'freq', 'count']) translation_tuples = [] for i in range(len(formatted_translation_table['AAs'])): name = formatted_translation_table['Base1'][i] + formatted_translation_table['Base2'][i] + formatted_translation_table['Base3'][i] AA = formatted_translation_table['AAs'][i] count = 0 for line in sequences: if name in line: count += 1 freq = count/no_of_sequences cod_count = count #count = joined_sequences.count(name) #freq = count/base_len translation_tuples.append(codon(name, AA, freq, count)) print(translation_tuples) pass
def return_codon_usage_table( sequences=_preload_sequences(), translation_table_str=TRANSL_TABLE_11 ): """ Receives a list of gene sequences and a translation table string Returns a string with all bases and their frequencies in a table with the following fields: codon_triplet: amino_acid_letter frequency_per_1000 absolute_occurrences Skip invalid coding sequences: --> must consist entirely of codons (3-base triplet) """ table = [] table.append('| Codon AA Freq Count | Codon AA Freq Count | Codon AA Freq Count | Codon AA Freq Count |') formatted_translation_table = {} for line in translation_table_str.splitlines(): pair = line.split(' = ') if len(pair) == 2: key = pair[0].strip() value = pair[1].strip() formatted_translation_table[key] = value formatted_translation_table['Base1'] = formatted_translation_table['Base1'].replace('T','U') formatted_translation_table['Base2'] = formatted_translation_table['Base2'].replace('T','U') formatted_translation_table['Base3'] = formatted_translation_table['Base3'].replace('T','U') print(formatted_translation_table) joined_sequences = "".join(sequences) base_len = len(joined_sequences) - 2 no_of_sequences = len(sequences) codon = collections.namedtuple('Codon', ['name', 'AA', 'freq', 'count']) translation_tuples = [] for i in range(len(formatted_translation_table['AAs'])): name = formatted_translation_table['Base1'][i] + formatted_translation_table['Base2'][i] + formatted_translation_table['Base3'][i] AA = formatted_translation_table['AAs'][i] count = 0 for line in sequences: if name in line: count += 1 freq = count/no_of_sequences cod_count = count #count = joined_sequences.count(name) #freq = count/base_len translation_tuples.append(codon(name, AA, freq, count)) print(translation_tuples) pass
Python
def _get_tags(tempfile=TEMPFILE): """Helper to parse all tags from a static copy of PyBites' feed, providing this here so you can focus on difflib""" with open(tempfile) as f: content = f.read().lower() # take a small subset to keep it performant tags = TAG_HTML.findall(content) tags = [tag for tag in tags if len(tag) > MIN_TAG_LEN] return set(tags)
def _get_tags(tempfile=TEMPFILE): """Helper to parse all tags from a static copy of PyBites' feed, providing this here so you can focus on difflib""" with open(tempfile) as f: content = f.read().lower() # take a small subset to keep it performant tags = TAG_HTML.findall(content) tags = [tag for tag in tags if len(tag) > MIN_TAG_LEN] return set(tags)
Python
def number_users_solving_bites(self) -> int: """Get the number of unique users that resolved one or more Bites""" return len(set(row['user'] for row in self.rows if row['completed'] == "True")) pass
def number_users_solving_bites(self) -> int: """Get the number of unique users that resolved one or more Bites""" return len(set(row['user'] for row in self.rows if row['completed'] == "True")) pass
Python
def top_bite_by_number_of_clicks(self) -> str: """Get the Bite that got accessed the most (= in most rows)""" return Counter(row['bite'] for row in self.rows).most_common(1)[0][0] pass
def top_bite_by_number_of_clicks(self) -> str: """Get the Bite that got accessed the most (= in most rows)""" return Counter(row['bite'] for row in self.rows).most_common(1)[0][0] pass
Python
def process_data(url: str) -> pd.DataFrame: """Process the data from the Github API Args: url (str): The URL where the data is located. Returns: pd.DataFrame: Pandas DataFrame generated from the processed data """ data = get_data(url) df = pd.read_csv(data['download_url']) df['month'] = pd.to_datetime(df['month']) return df
def process_data(url: str) -> pd.DataFrame: """Process the data from the Github API Args: url (str): The URL where the data is located. Returns: pd.DataFrame: Pandas DataFrame generated from the processed data """ data = get_data(url) df = pd.read_csv(data['download_url']) df['month'] = pd.to_datetime(df['month']) return df
Python
def summary_report(df: pd.DataFrame, stats: Union[List[str], None] = STATS) -> None: """Summary report generated from the DataFrame and list of stats Will aggregate statistics for sum, mean, and max by default. Args: df (pd.DataFrame): Pandas DataFrame of the Github API data stats (List[str], optional): List of summaries to aggregate. Defaults to STATS. Returns: None (prints to standard output) Example: sum mean max year 2013 484247.51 40353.959167 81777.35 2014 470532.51 39211.042500 75972.56 2015 608473.83 50706.152500 97237.42 2016 733947.03 61162.252500 118447.83 """ df['year'] = df['month'].dt.year print(df.groupby(df['year']).agg(stats)['sales']) pass
def summary_report(df: pd.DataFrame, stats: Union[List[str], None] = STATS) -> None: """Summary report generated from the DataFrame and list of stats Will aggregate statistics for sum, mean, and max by default. Args: df (pd.DataFrame): Pandas DataFrame of the Github API data stats (List[str], optional): List of summaries to aggregate. Defaults to STATS. Returns: None (prints to standard output) Example: sum mean max year 2013 484247.51 40353.959167 81777.35 2014 470532.51 39211.042500 75972.56 2015 608473.83 50706.152500 97237.42 2016 733947.03 61162.252500 118447.83 """ df['year'] = df['month'].dt.year print(df.groupby(df['year']).agg(stats)['sales']) pass
Python
def yearly_report(df: pd.DataFrame, year: int) -> None: """Generate a sales report for the given year Args: df (pd.DataFrame): Pandas DataFrame of the Github API data year (int): The year to generate the report for Raises: ValueError: Error raised if the year requested is not in the data. Should be in the form of "The year YEAR is not included in the report!" Returns: None (prints to standard output) Example: 2013 sales month 1 14236.90 2 4519.89 3 55691.01 4 28295.35 5 23648.29 6 34595.13 7 33946.39 8 27909.47 9 81777.35 10 31453.39 11 78628.72 12 69545.62 """ years = list(df['month'].dt.year.unique()) if year not in years: raise ValueError('The year {} is not included in the report!'.format(year)) data = df[['month', 'sales']] data = df.loc[df['month'].dt.year == year] print() print(year) print(data.groupby(df['month'].dt.month).agg('sum')) pass
def yearly_report(df: pd.DataFrame, year: int) -> None: """Generate a sales report for the given year Args: df (pd.DataFrame): Pandas DataFrame of the Github API data year (int): The year to generate the report for Raises: ValueError: Error raised if the year requested is not in the data. Should be in the form of "The year YEAR is not included in the report!" Returns: None (prints to standard output) Example: 2013 sales month 1 14236.90 2 4519.89 3 55691.01 4 28295.35 5 23648.29 6 34595.13 7 33946.39 8 27909.47 9 81777.35 10 31453.39 11 78628.72 12 69545.62 """ years = list(df['month'].dt.year.unique()) if year not in years: raise ValueError('The year {} is not included in the report!'.format(year)) data = df[['month', 'sales']] data = df.loc[df['month'].dt.year == year] print() print(year) print(data.groupby(df['month'].dt.month).agg('sum')) pass
Python
def create_chessboard(size=8): """Create a chessboard with of the size passed in. Don't return anything, print the output to stdout""" for i in range(int(size/2)): sequence = [WHITE, BLACK]*int(size/2) print("".join(sequence)) print("".join(sequence[::-1])) pass
def create_chessboard(size=8): """Create a chessboard with of the size passed in. Don't return anything, print the output to stdout""" for i in range(int(size/2)): sequence = [WHITE, BLACK]*int(size/2) print("".join(sequence)) print("".join(sequence[::-1])) pass
Python
def filter_positive_even_numbers(numbers): """Receives a list of numbers, and returns a filtered list of only the numbers that are both positive and even (divisible by 2), try to use a list comprehension.""" return [n for n in numbers if n%2==0] pass
def filter_positive_even_numbers(numbers): """Receives a list of numbers, and returns a filtered list of only the numbers that are both positive and even (divisible by 2), try to use a list comprehension.""" return [n for n in numbers if n%2==0] pass
Python
def process_grid_coordinates(self, s1: str, s2: str): """A helper function you might want to create to process the top left hand corner coordinates and the bottom right hand coordinates given in the instructions :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on Suggested return are 4 integers representing x1, x2, y1, y2 [hint]""" output_list = [] for char in s1.split(','): if char.isdigit(): output_list.append(int(char)) for char in s2.split(','): if char.isdigit(): output_list.append(int(char)) return output_list pass
def process_grid_coordinates(self, s1: str, s2: str): """A helper function you might want to create to process the top left hand corner coordinates and the bottom right hand coordinates given in the instructions :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on Suggested return are 4 integers representing x1, x2, y1, y2 [hint]""" output_list = [] for char in s1.split(','): if char.isdigit(): output_list.append(int(char)) for char in s2.split(','): if char.isdigit(): output_list.append(int(char)) return output_list pass
Python
def validate_grid(self): """A helper function you might want to write to verify that: - no lights are brighter than 5 - no lights are less than 0""" for i in range(self.grid_size): for j in range(self.grid_size): assert self.grid.iloc[i,j] >= 0 assert self.grid.iloc[i,j] <= 5 pass
def validate_grid(self): """A helper function you might want to write to verify that: - no lights are brighter than 5 - no lights are less than 0""" for i in range(self.grid_size): for j in range(self.grid_size): assert self.grid.iloc[i,j] >= 0 assert self.grid.iloc[i,j] <= 5 pass
Python
def turn_on(self, s1: str, s2: str): """The turn_on function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given: - If the light is already on, do nothing - If the light is off, turn it on at intensity 1 """ # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights == 0 in the slice mask_off = sub_df == 0 # # Now turn on all lights that are off sub_df[mask_off] = 1 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid
def turn_on(self, s1: str, s2: str): """The turn_on function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given: - If the light is already on, do nothing - If the light is off, turn it on at intensity 1 """ # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights == 0 in the slice mask_off = sub_df == 0 # # Now turn on all lights that are off sub_df[mask_off] = 1 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid
Python
def turn_off(self, s1: str, s2: str): """The turn_off function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on Turn off all lights in the grid slice given.""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights == 0 in the slice mask_on = sub_df > 0 # # Now turn on all lights that are off sub_df[mask_on] = 0 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
def turn_off(self, s1: str, s2: str): """The turn_off function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on Turn off all lights in the grid slice given.""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights == 0 in the slice mask_on = sub_df > 0 # # Now turn on all lights that are off sub_df[mask_on] = 0 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
Python
def turn_up(self, amount: int, s1: str, s2: str): """The turn_up function takes 3 parameters: :param amount: The intensity to turn the lights up by :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given turn the light up by the given amount. Don't turn a light up past 5""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all relevant lights in the slice mask_on = sub_df < 5 # # Now turn on all lights that are off sub_df[mask_on] += amount above_five = sub_df > 5 sub_df[above_five] = 5 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
def turn_up(self, amount: int, s1: str, s2: str): """The turn_up function takes 3 parameters: :param amount: The intensity to turn the lights up by :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given turn the light up by the given amount. Don't turn a light up past 5""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all relevant lights in the slice mask_on = sub_df < 5 # # Now turn on all lights that are off sub_df[mask_on] += amount above_five = sub_df > 5 sub_df[above_five] = 5 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
Python
def turn_down(self, amount: int, s1: str, s2: str): """The turn down function takes 3 parameters: :param amount: The intensity to turn the lights down by :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given turn the light down by the given amount. Don't turn a light down past 0""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all relevant lights in the slice mask_on = sub_df > 0 # # Now turn down all lights that are on sub_df[mask_on] -= amount negatives = sub_df < 0 sub_df[negatives] = 0 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
def turn_down(self, amount: int, s1: str, s2: str): """The turn down function takes 3 parameters: :param amount: The intensity to turn the lights down by :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given turn the light down by the given amount. Don't turn a light down past 0""" # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all relevant lights in the slice mask_on = sub_df > 0 # # Now turn down all lights that are on sub_df[mask_on] -= amount negatives = sub_df < 0 sub_df[negatives] = 0 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid pass
Python
def toggle(self, s1: str, s2: str): """The toggle function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given: - If the light is on, turn it off - If the light is off, turn it on at intensity 3 """ # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights > 0 in the slice mask_on = sub_df > 0 # Now turn off all lights that are on in the slice sub_df[mask_on] = 0 # Set all lights that are off to 3 in the slice sub_df[~mask_on] = 3 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid
def toggle(self, s1: str, s2: str): """The toggle function takes 2 parameters: :param s1: The top left hand corner of the grid to operate on :param s1: The bottom right hand corner of the grid to operate on For each light in the grid slice given: - If the light is on, turn it off - If the light is off, turn it on at intensity 3 """ # Process grid coordinates grid_coords = self.process_grid_coordinates(s1, s2) x1, y1, x2, y2 = grid_coords x2 += 1 y2 += 1 # First extract the slice of the grid into a new dataframe sub_df = self.grid.iloc[x1:x2, y1:y2] # Now create a mask of all lights > 0 in the slice mask_on = sub_df > 0 # Now turn off all lights that are on in the slice sub_df[mask_on] = 0 # Set all lights that are off to 3 in the slice sub_df[~mask_on] = 3 # Finally overwrite the grid with the new values self.grid.iloc[x1:x2, y1:y2] = sub_df return self.grid
Python
def follow_instructions(self): """Function to process all instructions. Each instruction should be processed in sequence, excluding the first instruction of course. """ for line in self.instructions: words = line.split() numbers = [chunk for chunk in line.split() if ',' in chunk] s1, s2 = numbers if words[0] == 'toggle': self.toggle(s1, s2) else: if words[1] == 'off': self.turn_off(s1,s2) elif words[1] == 'on': self.turn_on(s1,s2) elif words[1] == 'down': self.turn_down(int(words[2]), s1, s2) else: self.turn_up(int(words[2]), s1, s2) pass
def follow_instructions(self): """Function to process all instructions. Each instruction should be processed in sequence, excluding the first instruction of course. """ for line in self.instructions: words = line.split() numbers = [chunk for chunk in line.split() if ',' in chunk] s1, s2 = numbers if words[0] == 'toggle': self.toggle(s1, s2) else: if words[1] == 'off': self.turn_off(s1,s2) elif words[1] == 'on': self.turn_on(s1,s2) elif words[1] == 'down': self.turn_down(int(words[2]), s1, s2) else: self.turn_up(int(words[2]), s1, s2) pass
Python
def player_with_max_points_per_game(): """The player with highest average points per game (don't forget to CAST to numeric in your SQL query)""" return cur.execute("SELECT name FROM players WHERE CAST(avg_points AS numeric)=(SELECT MAX(CAST(avg_points AS numeric)) FROM players)").fetchone()[0] pass
def player_with_max_points_per_game(): """The player with highest average points per game (don't forget to CAST to numeric in your SQL query)""" return cur.execute("SELECT name FROM players WHERE CAST(avg_points AS numeric)=(SELECT MAX(CAST(avg_points AS numeric)) FROM players)").fetchone()[0] pass
Python
def avg_years_active_players_stanford(): """Return the average years that players from "Stanford University are active ("active" column)""" return cur.execute("SELECT ROUND(AVG(active), 2) FROM players WHERE college='Stanford University'").fetchone()[0] pass
def avg_years_active_players_stanford(): """Return the average years that players from "Stanford University are active ("active" column)""" return cur.execute("SELECT ROUND(AVG(active), 2) FROM players WHERE college='Stanford University'").fetchone()[0] pass
Python
def dec_to_base(number, base): """ Input: number is the number to be converted base is the new base (eg. 2, 6, or 8) Output: the converted number in the new base without the prefix (eg. '0b') """ # your code if number == 0: return 0 else: quotient = number % base return quotient + 10*dec_to_base(number//base, base)
def dec_to_base(number, base): """ Input: number is the number to be converted base is the new base (eg. 2, 6, or 8) Output: the converted number in the new base without the prefix (eg. '0b') """ # your code if number == 0: return 0 else: quotient = number % base return quotient + 10*dec_to_base(number//base, base)
Python
def download_pickle_file(): """download a pickle file we created with a list of namedtuples """ urlretrieve(f'{S3}/bite317.pkl', PICKLE_INFILE)
def download_pickle_file(): """download a pickle file we created with a list of namedtuples """ urlretrieve(f'{S3}/bite317.pkl', PICKLE_INFILE)
Python
def deserialize(pkl_file: Path = PICKLE_INFILE) -> Sequence[NamedTuple]: """Load the list of namedtuples from the pickle file passed in""" data = [] with open(pkl_file, 'rb') as picklefile: data.append(pickle.load(picklefile)) return data[0]
def deserialize(pkl_file: Path = PICKLE_INFILE) -> Sequence[NamedTuple]: """Load the list of namedtuples from the pickle file passed in""" data = [] with open(pkl_file, 'rb') as picklefile: data.append(pickle.load(picklefile)) return data[0]
Python
def serialize(pkl_file: Path = PICKLE_OUTFILE, data: Sequence[NamedTuple] = None) -> None: """Save the data passed in to the pickle file passed in""" if data is None: data = deserialize() # you code ... with open(pkl_file, 'wb') as picklefile: pickle.dump(data, picklefile) return picklefile
def serialize(pkl_file: Path = PICKLE_OUTFILE, data: Sequence[NamedTuple] = None) -> None: """Save the data passed in to the pickle file passed in""" if data is None: data = deserialize() # you code ... with open(pkl_file, 'wb') as picklefile: pickle.dump(data, picklefile) return picklefile
Python
def cleaned(self) -> str: """Takes a corpus and cleans it up. * All text is made lowercase * All punctuations are removed * If a list of extract objects were given, remove those too :param txt: Corpus of text :return: cleaned up corpus """ cleaned_text = self.txt.lower() no_punc = cleaned_text.translate(str.maketrans('', '', string.punctuation)) #no_punc = no_punc.translate(str.maketrans('—',' ')) no_extras = no_punc for char in self.extra: no_extras = no_extras.replace(char, ' ') #else: # no_extras = no_extras.translate(str.maketrans(char,' '*len(char))) return no_extras pass
def cleaned(self) -> str: """Takes a corpus and cleans it up. * All text is made lowercase * All punctuations are removed * If a list of extract objects were given, remove those too :param txt: Corpus of text :return: cleaned up corpus """ cleaned_text = self.txt.lower() no_punc = cleaned_text.translate(str.maketrans('', '', string.punctuation)) #no_punc = no_punc.translate(str.maketrans('—',' ')) no_extras = no_punc for char in self.extra: no_extras = no_extras.replace(char, ' ') #else: # no_extras = no_extras.translate(str.maketrans(char,' '*len(char))) return no_extras pass
Python
def metrics(self) -> List[Tuple[str, int]]: """Generates word count metrics. * Using the cleaned up corpus, count up how many times each word is used * Exclude stop words using STOPWORDS * Use count to return the requested amount of the top words, defaults to 5 :return: List of tuples, i.e. ("word", count) """ corpus = self.cleaned corpus_cleaned = [word for word in corpus.split() if word not in self.stopwords] counted = Counter(corpus_cleaned) return counted.most_common(self.count) pass
def metrics(self) -> List[Tuple[str, int]]: """Generates word count metrics. * Using the cleaned up corpus, count up how many times each word is used * Exclude stop words using STOPWORDS * Use count to return the requested amount of the top words, defaults to 5 :return: List of tuples, i.e. ("word", count) """ corpus = self.cleaned corpus_cleaned = [word for word in corpus.split() if word not in self.stopwords] counted = Counter(corpus_cleaned) return counted.most_common(self.count) pass
Python
def graph(self) -> None: """Generates a textual graph of the words * Prints out the words along with a "tag" bar graph, defaults to using the # character * The word is right-aligned and takes up 10 character spaces * The tag is repeated the number of counts of the word For example, the top 10 words in the Gettysburgh address would be displayed in this manner: nation ##### dedicated #### great ### cannot ### dead ### us ### shall ### people ### new ## conceived ## :param metrics: List of tuples with word counts :return: None """ metrics = self.metrics sorted_metrics = sorted(metrics, key = lambda x: x[1], reverse = True) for metric in sorted_metrics[:self.count]: print(metric[0].rjust(10), self.tag*metric[1]) pass
def graph(self) -> None: """Generates a textual graph of the words * Prints out the words along with a "tag" bar graph, defaults to using the # character * The word is right-aligned and takes up 10 character spaces * The tag is repeated the number of counts of the word For example, the top 10 words in the Gettysburgh address would be displayed in this manner: nation ##### dedicated #### great ### cannot ### dead ### us ### shall ### people ### new ## conceived ## :param metrics: List of tuples with word counts :return: None """ metrics = self.metrics sorted_metrics = sorted(metrics, key = lambda x: x[1], reverse = True) for metric in sorted_metrics[:self.count]: print(metric[0].rjust(10), self.tag*metric[1]) pass
Python
def count_n_repetitions(text, n=1): """ Counts how often characters are followed by themselves for n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 """ findall_list = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_list)
def count_n_repetitions(text, n=1): """ Counts how often characters are followed by themselves for n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 """ findall_list = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_list)
Python
def count_n_reps_or_n_chars_following(text, n=1, char=""): """ Counts how often characters are repeated for n times, or followed by char n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 char: Character which also counts if repeated n times """ if char == "": findall_n_reps = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_n_reps) else: if char in '<([{\\^-=$!|]})?*+.>': char = f'\\{char}' print(char) findall_n_chars_following = re.findall(rf'([\s\S])(?=\1{{{n}}})|([\s\S])(?={char}{{{n}}})', text) print(findall_n_chars_following) return len(findall_n_chars_following)
def count_n_reps_or_n_chars_following(text, n=1, char=""): """ Counts how often characters are repeated for n times, or followed by char n times. text: UTF-8 compliant input text n: How often character should be repeated, defaults to 1 char: Character which also counts if repeated n times """ if char == "": findall_n_reps = re.findall(rf'([\s\S])(?=\1{{{n}}})', text) return len(findall_n_reps) else: if char in '<([{\\^-=$!|]})?*+.>': char = f'\\{char}' print(char) findall_n_chars_following = re.findall(rf'([\s\S])(?=\1{{{n}}})|([\s\S])(?={char}{{{n}}})', text) print(findall_n_chars_following) return len(findall_n_chars_following)
Python
def check_surrounding_chars(text, surrounding_chars): """ Count the number of times a character is surrounded by characters from the surrounding_chars list. text: UTF-8 compliant input text surrounding_chars: List of characters """ corrected_chars = [] for char in surrounding_chars: if char in '<([{\\^-=$!|]})?*+.>': corrected_chars.append(f'\\{char}') else: corrected_chars.append(char) check_surrounding = re.findall(rf'(?<=[{"".join(corrected_chars)}])([\s\S])(?=[{"".join(corrected_chars)}])', text) return len(check_surrounding)
def check_surrounding_chars(text, surrounding_chars): """ Count the number of times a character is surrounded by characters from the surrounding_chars list. text: UTF-8 compliant input text surrounding_chars: List of characters """ corrected_chars = [] for char in surrounding_chars: if char in '<([{\\^-=$!|]})?*+.>': corrected_chars.append(f'\\{char}') else: corrected_chars.append(char) check_surrounding = re.findall(rf'(?<=[{"".join(corrected_chars)}])([\s\S])(?=[{"".join(corrected_chars)}])', text) return len(check_surrounding)
Python
def rent_or_stream( renting_history: RentingHistory, streaming_cost_per_month: int = STREAMING_COST_PER_MONTH ) -> Dict[str, str]: """Function that calculates if renting movies one by one is cheaper than streaming movies by months. Determine this PER MONTH for the movies in renting_history. Return a dict of: keys = months (YYYY-MM) values = 'rent' or 'stream' based on what is cheaper Check out the tests for examples. """ considered_months = {date.strftime(timeformat.date, '%Y-%m') for timeformat in renting_history} rentprice_dict = {key: 0 for key in considered_months} result_dict = {key: "stream" for key in considered_months} for month in considered_months: for movie in renting_history: if date.strftime(movie.date, '%Y-%m') == month: rentprice_dict[month] += movie.price for month in result_dict: if rentprice_dict[month] <= streaming_cost_per_month: result_dict[month] = 'rent' return result_dict pass
def rent_or_stream( renting_history: RentingHistory, streaming_cost_per_month: int = STREAMING_COST_PER_MONTH ) -> Dict[str, str]: """Function that calculates if renting movies one by one is cheaper than streaming movies by months. Determine this PER MONTH for the movies in renting_history. Return a dict of: keys = months (YYYY-MM) values = 'rent' or 'stream' based on what is cheaper Check out the tests for examples. """ considered_months = {date.strftime(timeformat.date, '%Y-%m') for timeformat in renting_history} rentprice_dict = {key: 0 for key in considered_months} result_dict = {key: "stream" for key in considered_months} for month in considered_months: for movie in renting_history: if date.strftime(movie.date, '%Y-%m') == month: rentprice_dict[month] += movie.price for month in result_dict: if rentprice_dict[month] <= streaming_cost_per_month: result_dict[month] = 'rent' return result_dict pass
Python
def load_pycon_data(pycon_videos=pycon_videos): """Load the pickle file (pycon_videos) and return the data structure it holds""" with open(pycon_videos, 'rb') as f: file = pickle.load(f) return file pass
def load_pycon_data(pycon_videos=pycon_videos): """Load the pickle file (pycon_videos) and return the data structure it holds""" with open(pycon_videos, 'rb') as f: file = pickle.load(f) return file pass
Python
def has_timestamp(text): """Return True if text has a timestamp of this format: 2014-07-03T23:30:37""" pattern = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}' find = re.findall(pattern, text) return bool(find) pass
def has_timestamp(text): """Return True if text has a timestamp of this format: 2014-07-03T23:30:37""" pattern = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}' find = re.findall(pattern, text) return bool(find) pass
Python
def is_integer(number): """Return True if number is an integer""" pattern = r'^[-+]?[\d]+$' return bool(re.search(pattern, str(number))) pass
def is_integer(number): """Return True if number is an integer""" pattern = r'^[-+]?[\d]+$' return bool(re.search(pattern, str(number))) pass
Python
def has_word_with_dashes(text): """Returns True if text has one or more words with dashes""" pattern = r'[0-9a-zA-Z]+-[0-9a-zA-Z]+' return re.search(pattern, text) pass
def has_word_with_dashes(text): """Returns True if text has one or more words with dashes""" pattern = r'[0-9a-zA-Z]+-[0-9a-zA-Z]+' return re.search(pattern, text) pass
Python
def remove_duplicate_spacing(text): """Replace multiple spaces by one space""" pattern = r'[\s]+' return re.sub(pattern, " ", text) pass
def remove_duplicate_spacing(text): """Replace multiple spaces by one space""" pattern = r'[\s]+' return re.sub(pattern, " ", text) pass
Python
def has_three_consecutive_vowels(word): """Returns True if word has at least 3 consecutive vowels""" pattern = r'[aeiou]{3,}' return bool(re.search(pattern, word)) pass
def has_three_consecutive_vowels(word): """Returns True if word has at least 3 consecutive vowels""" pattern = r'[aeiou]{3,}' return bool(re.search(pattern, word)) pass
Python
def create_uno_deck(): """Create a deck of 108 Uno cards. Return a list of UnoCard namedtuples (for cards w/o suit use None in the namedtuple)""" deck = [] for suit in SUITS: zeroes = UnoCard(suit, 0) numbers = [2*UnoCard(suit, x) for x in range(1, 10)] draw_two = 2*UnoCard(suit, 'Draw Two') skip = 2*UnoCard(suit, 'Skip') reverse = 2*UnoCard(suit, 'Reverse') deck.append(zeroes) deck += numbers deck.append(draw_two) deck.append(skip) deck.append(reverse) deck += [4*UnoCard(None, 'Wild'), 4*UnoCard(None, 'Wild Draw')] return deck pass
def create_uno_deck(): """Create a deck of 108 Uno cards. Return a list of UnoCard namedtuples (for cards w/o suit use None in the namedtuple)""" deck = [] for suit in SUITS: zeroes = UnoCard(suit, 0) numbers = [2*UnoCard(suit, x) for x in range(1, 10)] draw_two = 2*UnoCard(suit, 'Draw Two') skip = 2*UnoCard(suit, 'Skip') reverse = 2*UnoCard(suit, 'Reverse') deck.append(zeroes) deck += numbers deck.append(draw_two) deck.append(skip) deck.append(reverse) deck += [4*UnoCard(None, 'Wild'), 4*UnoCard(None, 'Wild Draw')] return deck pass
Python
def show(self, items=None): """Print a simple table of cart items with total at the end""" items = items if items is not None else self for item in items: product = f'{item.product}' if item.craving: product += ' (craving)' print(f'{product:<30} | {item.price:>3}') self._print_total(items)
def show(self, items=None): """Print a simple table of cart items with total at the end""" items = items if items is not None else self for item in items: product = f'{item.product}' if item.craving: product += ' (craving)' print(f'{product:<30} | {item.price:>3}') self._print_total(items)
Python
def _print_total(self, items): """Calculate and print total price of cart""" total = sum(item.price for item in items) print('-' * 36) print(f'{"Total":<30} | {total:>3}')
def _print_total(self, items): """Calculate and print total price of cart""" total = sum(item.price for item in items) print('-' * 36) print(f'{"Total":<30} | {total:>3}')
Python
def delete(self, product): """Delete item matching 'product', raises IndexError if no item matches""" for i, item in enumerate(self): if item.product == product: self._items.pop(i) break else: raise IndexError(f'{product} not in cart') self.show()
def delete(self, product): """Delete item matching 'product', raises IndexError if no item matches""" for i, item in enumerate(self): if item.product == product: self._items.pop(i) break else: raise IndexError(f'{product} not in cart') self.show()
Python
def search(self, search): """Filters items matching insensitive 'contains' search, and passes them to show for printing""" items = [item for item in self if search.lower() in item.product.lower()] self.show(items)
def search(self, search): """Filters items matching insensitive 'contains' search, and passes them to show for printing""" items = [item for item in self if search.lower() in item.product.lower()] self.show(items)
Python
def create_user_bar_chart(content): """Receives csv file (decoded) content and returns a table of timezones and their corresponding member counts in pluses (see Bite/tests)""" country_count = Counter(row[2] for row in content) country_count = sorted(country_count.items(), key=lambda x: x[0]) str_to_return = '' for row in country_count: str_to_return += row[0] str_to_return += ' '*(21-len(row[0])) str_to_return += '| ' str_to_return += '+'*row[1] str_to_return += '\n' print(str_to_return) pass
def create_user_bar_chart(content): """Receives csv file (decoded) content and returns a table of timezones and their corresponding member counts in pluses (see Bite/tests)""" country_count = Counter(row[2] for row in content) country_count = sorted(country_count.items(), key=lambda x: x[0]) str_to_return = '' for row in country_count: str_to_return += row[0] str_to_return += ' '*(21-len(row[0])) str_to_return += '| ' str_to_return += '+'*row[1] str_to_return += '\n' print(str_to_return) pass
Python
def gen_files(): """Return a generator of dir names reading in tempfile tempfile has this format: challenge<int>/file_or_dir<str>,is_dir<bool> 03/rss.xml,False 03/tags.html,False ... 03/mridubhatnagar,True 03/aleksandarknezevic,True -> use last column to filter out directories (= True) """ with open(tempfile) as f: raw_data = f.read().splitlines() for line in raw_data: if line.split(',')[1] == 'True': yield line.split(',')[0] pass
def gen_files(): """Return a generator of dir names reading in tempfile tempfile has this format: challenge<int>/file_or_dir<str>,is_dir<bool> 03/rss.xml,False 03/tags.html,False ... 03/mridubhatnagar,True 03/aleksandarknezevic,True -> use last column to filter out directories (= True) """ with open(tempfile) as f: raw_data = f.read().splitlines() for line in raw_data: if line.split(',')[1] == 'True': yield line.split(',')[0] pass
Python
def diehard_pybites(): """Return a Stats namedtuple (defined above) that contains the user that made the most PRs (ignoring the users in IGNORE) and a challenge tuple of most popular challenge and the amount of PRs for that challenge. Calling this function on the dataset (held tempfile) should return: Stats(user='clamytoe', challenge=('01', 7)) """ for line in gen_files(): challenge, user = line.split('/') if user not in IGNORE: users[user] += 1 popular_challenges[challenge] += 1 highest_user = users.most_common(1) most_popular_challenge = popular_challenges.most_common(1) return Stats(highest_user[0][0], most_popular_challenge[0]) pass
def diehard_pybites(): """Return a Stats namedtuple (defined above) that contains the user that made the most PRs (ignoring the users in IGNORE) and a challenge tuple of most popular challenge and the amount of PRs for that challenge. Calling this function on the dataset (held tempfile) should return: Stats(user='clamytoe', challenge=('01', 7)) """ for line in gen_files(): challenge, user = line.split('/') if user not in IGNORE: users[user] += 1 popular_challenges[challenge] += 1 highest_user = users.most_common(1) most_popular_challenge = popular_challenges.most_common(1) return Stats(highest_user[0][0], most_popular_challenge[0]) pass
Python
def operas_both_at_premiere(guest, composer): """Retrieves a list of titles of operas, where the guest and the composer could have been together at premiere. That is the Opera.author matches the composer passed in, and both guest and composer are alive at the time of Opera.date. If guest and/or composer are not in the composers dict, raise a ValueError Args: guest (str): one of the composers but not the author of an opera composer (str): the author of an opera Returns a list (or generator) of titles of operas. """ final_operas = [] possible_operas = [opera for opera in operas if opera.author == composer] if guest not in composers.keys(): raise ValueError if composer not in set(opera.author for opera in operas): raise ValueError visitor = composers[guest] author = composers[composer] for opera in possible_operas: if _get_date(opera.date) < _get_date(visitor.died) and _get_date(opera.date) > _get_date(visitor.born) and _get_date(opera.date) < _get_date(author.died) and _get_date(opera.date) > _get_date(author.born): final_operas.append(opera.play) return final_operas pass
def operas_both_at_premiere(guest, composer): """Retrieves a list of titles of operas, where the guest and the composer could have been together at premiere. That is the Opera.author matches the composer passed in, and both guest and composer are alive at the time of Opera.date. If guest and/or composer are not in the composers dict, raise a ValueError Args: guest (str): one of the composers but not the author of an opera composer (str): the author of an opera Returns a list (or generator) of titles of operas. """ final_operas = [] possible_operas = [opera for opera in operas if opera.author == composer] if guest not in composers.keys(): raise ValueError if composer not in set(opera.author for opera in operas): raise ValueError visitor = composers[guest] author = composers[composer] for opera in possible_operas: if _get_date(opera.date) < _get_date(visitor.died) and _get_date(opera.date) > _get_date(visitor.born) and _get_date(opera.date) < _get_date(author.died) and _get_date(opera.date) > _get_date(author.born): final_operas.append(opera.play) return final_operas pass
Python
def tail(filepath, n): """Similate Unix' tail -n, read in filepath, parse it into a list, strip newlines and return a list of the last n lines""" with open(filepath) as f: text = f.read() lines = text.splitlines() max_n = len(lines) print(lines) if n > max_n: return lines else: return lines[max_n-n:] pass
def tail(filepath, n): """Similate Unix' tail -n, read in filepath, parse it into a list, strip newlines and return a list of the last n lines""" with open(filepath) as f: text = f.read() lines = text.splitlines() max_n = len(lines) print(lines) if n > max_n: return lines else: return lines[max_n-n:] pass
Python
def extract_course_times(course=COURSE): """Return the course timings from the passed in course string. Timings are in mm:ss (minutes:seconds) format, so taking COURSE above you would extract: ['01:47', '32:03', '41:51', '27:48', '05:02'] Return this list. """ pattern = r'([0-9]{2}\:[0-9]{2})' times = re.findall(pattern, course) return times pass
def extract_course_times(course=COURSE): """Return the course timings from the passed in course string. Timings are in mm:ss (minutes:seconds) format, so taking COURSE above you would extract: ['01:47', '32:03', '41:51', '27:48', '05:02'] Return this list. """ pattern = r'([0-9]{2}\:[0-9]{2})' times = re.findall(pattern, course) return times pass
Python
def fix_translation(org_text, trans_text): """Receives original English text as well as text returned by translator. Parse trans_text restoring the original (English) code (wrapped inside code and pre tags) into it. Return the fixed translation str """ pre_tag = re.compile(r'<pre>[\S\s]*<\/pre>') translated_pre = re.findall(pre_tag, org_text) #print(translated_portion_to_copy[0]) replace_pre = re.sub(pre_tag, translated_pre[0], trans_text) code_tag = re.compile(r'<code>[\S\s]*?<\/code>') translated_code = re.findall(code_tag, trans_text) original_code = re.findall(code_tag, org_text) split_by_code = re.split(code_tag, replace_pre) final_output = [None]*(len(split_by_code)+len(original_code)) final_output[::2] = split_by_code final_output[1::2] = original_code return "".join(final_output) pass
def fix_translation(org_text, trans_text): """Receives original English text as well as text returned by translator. Parse trans_text restoring the original (English) code (wrapped inside code and pre tags) into it. Return the fixed translation str """ pre_tag = re.compile(r'<pre>[\S\s]*<\/pre>') translated_pre = re.findall(pre_tag, org_text) #print(translated_portion_to_copy[0]) replace_pre = re.sub(pre_tag, translated_pre[0], trans_text) code_tag = re.compile(r'<code>[\S\s]*?<\/code>') translated_code = re.findall(code_tag, trans_text) original_code = re.findall(code_tag, org_text) split_by_code = re.split(code_tag, replace_pre) final_output = [None]*(len(split_by_code)+len(original_code)) final_output[::2] = split_by_code final_output[1::2] = original_code return "".join(final_output) pass
Python
def sort_words_case_insensitively(words): """Sort the provided word list ignoring case, and numbers last (1995, 19ab = numbers / Happy, happy4you = strings, hence for numbers you only need to check the first char of the word) """ key = lambda text: 'z'+text if text[0].isdigit() else text.lower() return sorted(words, key=key) pass
def sort_words_case_insensitively(words): """Sort the provided word list ignoring case, and numbers last (1995, 19ab = numbers / Happy, happy4you = strings, hence for numbers you only need to check the first char of the word) """ key = lambda text: 'z'+text if text[0].isdigit() else text.lower() return sorted(words, key=key) pass
Python
def divide_numbers(numerator, denominator): """For this exercise you can assume numerator and denominator are of type int/str/float. Try to convert numerator and denominator to int types, if that raises a ValueError reraise it. Following do the division and return the result. However if denominator is 0 catch the corresponding exception Python throws (cannot divide by 0), and return 0""" try: top = int(numerator) except ValueError: return ValueError('The numerator cannot be made into an integer') try: below = int(denominator) except ValueError: return ValueError('The denominator cannot be made into an integer') try: return top/bottom except: return 0 pass
def divide_numbers(numerator, denominator): """For this exercise you can assume numerator and denominator are of type int/str/float. Try to convert numerator and denominator to int types, if that raises a ValueError reraise it. Following do the division and return the result. However if denominator is 0 catch the corresponding exception Python throws (cannot divide by 0), and return 0""" try: top = int(numerator) except ValueError: return ValueError('The numerator cannot be made into an integer') try: below = int(denominator) except ValueError: return ValueError('The denominator cannot be made into an integer') try: return top/bottom except: return 0 pass
Python
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" scores = [movie.score for movie in movies] return round(mean(scores),1) pass
def calc_mean_score(movies): """Helper method to calculate mean of list of Movie namedtuples, round the mean to 1 decimal place""" scores = [movie.score for movie in movies] return round(mean(scores),1) pass
Python
def slice_and_dice(text: str = text) -> list: """Get a list of words from the passed in text. See the Bite description for step by step instructions""" results = [] textblock = text.strip().split("\n") print(textblock) for phrase in textblock: processed = phrase.strip() print(processed) if processed[0] in ascii_lowercase: lastword = processed.split(" ")[-1] processed.split(" ")[-1] = lastword.strip(".!") print(lastword.strip('.!')) print(processed) results.append(processed) return results
def slice_and_dice(text: str = text) -> list: """Get a list of words from the passed in text. See the Bite description for step by step instructions""" results = [] textblock = text.strip().split("\n") print(textblock) for phrase in textblock: processed = phrase.strip() print(processed) if processed[0] in ascii_lowercase: lastword = processed.split(" ")[-1] processed.split(" ")[-1] = lastword.strip(".!") print(lastword.strip('.!')) print(processed) results.append(processed) return results
Python
def mark_islands(i, j, grid): """ Input: the row, column and grid Output: None. Just mark the visisted islands as in-place operation. """ grid[i][j] = 'X' def check_adjacents(i,j): n = [True, True, True, True] cells_to_check = [(i,j)] while any(n): for pair in cells_to_check: x = pair[0] y = pair[1] left = max(x-1,0) right = max(x+1,0) up = max(y-1,0) down = max(y+1,0) try: if grid[left][y] == 1: grid[left][y] = 'X' cells_to_check.append((left,y)) else: n[0] = False except: n[0] = False try: if grid[right][y] == 1: grid[right][y] = 'X' cells_to_check.append((right,y)) else: n[1] = False except: n[1] = False try: if grid[x][up] == 1: grid[x][up] = 'X' cells_to_check.append((x,up)) else: n[2] = False except: n[2] = False try: if grid[x][down] == 1: grid[x][down] = 'X' cells_to_check.append((x,down)) else: n[3] = False except: n[3] = False check_adjacents(i, j)
def mark_islands(i, j, grid): """ Input: the row, column and grid Output: None. Just mark the visisted islands as in-place operation. """ grid[i][j] = 'X' def check_adjacents(i,j): n = [True, True, True, True] cells_to_check = [(i,j)] while any(n): for pair in cells_to_check: x = pair[0] y = pair[1] left = max(x-1,0) right = max(x+1,0) up = max(y-1,0) down = max(y+1,0) try: if grid[left][y] == 1: grid[left][y] = 'X' cells_to_check.append((left,y)) else: n[0] = False except: n[0] = False try: if grid[right][y] == 1: grid[right][y] = 'X' cells_to_check.append((right,y)) else: n[1] = False except: n[1] = False try: if grid[x][up] == 1: grid[x][up] = 'X' cells_to_check.append((x,up)) else: n[2] = False except: n[2] = False try: if grid[x][down] == 1: grid[x][down] = 'X' cells_to_check.append((x,down)) else: n[3] = False except: n[3] = False check_adjacents(i, j)
Python
def _create_defeat_mapping(): """Parse battle-table.csv building up a defeat_mapping dict with keys = attackers / values = who they defeat. """ defeat_mapping = dict() with open(BATTLE_DATA) as f: csv_reader = csv.DictReader(f) for row in csv_reader: defeat_mapping[row['Attacker']] = [keys for keys, values in row.items() if values == 'win'] return defeat_mapping pass
def _create_defeat_mapping(): """Parse battle-table.csv building up a defeat_mapping dict with keys = attackers / values = who they defeat. """ defeat_mapping = dict() with open(BATTLE_DATA) as f: csv_reader = csv.DictReader(f) for row in csv_reader: defeat_mapping[row['Attacker']] = [keys for keys, values in row.items() if values == 'win'] return defeat_mapping pass
Python
def contains_only_vowels(input_str): """Receives input string and checks if all chars are VOWELS. Match is case insensitive.""" return all(i.lower() in VOWELS for i in input_str) pass
def contains_only_vowels(input_str): """Receives input string and checks if all chars are VOWELS. Match is case insensitive.""" return all(i.lower() in VOWELS for i in input_str) pass
Python
def contains_any_py_chars(input_str): """Receives input string and checks if any of the PYTHON chars are in it. Match is case insensitive.""" return any(i.lower() in PYTHON for i in input_str) pass
def contains_any_py_chars(input_str): """Receives input string and checks if any of the PYTHON chars are in it. Match is case insensitive.""" return any(i.lower() in PYTHON for i in input_str) pass
Python
def contains_digits(input_str): """Receives input string and checks if it contains one or more digits.""" return any(i in string.digits for i in input_str) pass
def contains_digits(input_str): """Receives input string and checks if it contains one or more digits.""" return any(i in string.digits for i in input_str) pass
Python
def number_of_sections(self): """Return the number of sections in the ini file. New to properties? -> https://pybit.es/property-decorator.html """ return len(self.config.sections()) pass
def number_of_sections(self): """Return the number of sections in the ini file. New to properties? -> https://pybit.es/property-decorator.html """ return len(self.config.sections()) pass
Python
def base_python_versions(self): """Return a list of all basepython across the ini file""" bases = [] for sect in self.config.sections(): for k,v in self.config.items(sect): if k == 'basepython': bases.append(v) return list(set(bases)) pass
def base_python_versions(self): """Return a list of all basepython across the ini file""" bases = [] for sect in self.config.sections(): for k,v in self.config.items(sect): if k == 'basepython': bases.append(v) return list(set(bases)) pass
Python
def max_fund(village): """Find a contiguous subarray with the largest sum.""" # Hint: while iterating, you could save the best_sum collected so far # return total, starting, ending running_total = 0 max_total = 0 max_start = 0 max_end = 0 for i in range(len(village)+1): for j in range(i, len(village)+1): running_total = sum(village[i:j]) if running_total > max_total: max_total = running_total max_start = i + 1 max_end = j return (max_total, max_start, max_end) pass
def max_fund(village): """Find a contiguous subarray with the largest sum.""" # Hint: while iterating, you could save the best_sum collected so far # return total, starting, ending running_total = 0 max_total = 0 max_start = 0 max_end = 0 for i in range(len(village)+1): for j in range(i, len(village)+1): running_total = sum(village[i:j]) if running_total > max_total: max_total = running_total max_start = i + 1 max_end = j return (max_total, max_start, max_end) pass
Python
def load_words(): """Load the words dictionary (DICTIONARY constant) into a list and return it""" with open(DICTIONARY,'r') as f: WordList = f.read().splitlines() return WordList pass
def load_words(): """Load the words dictionary (DICTIONARY constant) into a list and return it""" with open(DICTIONARY,'r') as f: WordList = f.read().splitlines() return WordList pass
Python
def max_word_value(words): """Given a list of words calculate the word with the maximum value and return it""" word_values = dict(zip(words,[calc_word_value(word) for word in words])) return max(word_values, key=word_values.get) pass
def max_word_value(words): """Given a list of words calculate the word with the maximum value and return it""" word_values = dict(zip(words,[calc_word_value(word) for word in words])) return max(word_values, key=word_values.get) pass
Python
def rotate(string, n): """Rotate characters in a string. Expects string and n (int) for number of characters to move. """ length = len(string) if n == length: return string elif n > 0: return string[-(length-n):] + string[:n] else: return string[n:] + string[:length+n] pass
def rotate(string, n): """Rotate characters in a string. Expects string and n (int) for number of characters to move. """ length = len(string) if n == length: return string elif n > 0: return string[-(length-n):] + string[:n] else: return string[n:] + string[:length+n] pass
Python
def _maybe_DateFormats(date_str): """ Args: date_str (str) string representing a date in unknown format Returns: a list of enum members, where each member represents a possible date format for the input date_str """ d_parse_formats = DateFormat.get_d_parse_formats() maybe_formats = [] for idx, d_parse_fmt in enumerate(d_parse_formats): try: _parsed_date = datetime.strptime(date_str, d_parse_fmt) # pylint: disable=W0612 maybe_formats.append(DateFormat(idx)) except ValueError: pass if len(maybe_formats) == 0: maybe_formats.append(DateFormat.NONPARSABLE) return maybe_formats
def _maybe_DateFormats(date_str): """ Args: date_str (str) string representing a date in unknown format Returns: a list of enum members, where each member represents a possible date format for the input date_str """ d_parse_formats = DateFormat.get_d_parse_formats() maybe_formats = [] for idx, d_parse_fmt in enumerate(d_parse_formats): try: _parsed_date = datetime.strptime(date_str, d_parse_fmt) # pylint: disable=W0612 maybe_formats.append(DateFormat(idx)) except ValueError: pass if len(maybe_formats) == 0: maybe_formats.append(DateFormat.NONPARSABLE) return maybe_formats
Python
def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" automakers = [] for i in range(len(data)): automakers.append(data[i]['automaker']) automakers = set(automakers) print(automakers) pass
def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" automakers = [] for i in range(len(data)): automakers.append(data[i]['automaker']) automakers = set(automakers) print(automakers) pass
Python
def custom_series_function(ser: pd.Series, within: int) -> pd.core.series.Series: """A more challenging mask to apply. When passed a series of floats, return all values within the given rage of: - the minimum value - the 1st quartile value - the second quartile value - the mean - the third quartile value - the maximum value You may want to brush up on some simple statistics to help you here. Also, the series is passed to you sorted assending. Be sure that you don't return values out of sequence. So, for example if you mean is 5.0 and within is 0.1 return all value between 4.9 and 5.1 inclusive :param ser: Series to perform operation on :param within: The value to calculate the range of number within """ summary_values = ser.quantile([0, 0.25, 0.5, 0.75, 1]) mean = ser.mean() minimum = summary_values[0.00] first_quar = summary_values[0.25] median = summary_values[0.50] third_quar = summary_values[0.75] maximum = summary_values[1.00] boolean_mask = [any([(mean - within <= i <= mean + within), (minimum - within <= i <= minimum + within), (first_quar - within <= i <= first_quar + within), (median - within <= i <= median + within), (third_quar - within <= i <= third_quar + within), (maximum - within <= i <= maximum + within)]) for i in ser] return ser[boolean_mask] pass
def custom_series_function(ser: pd.Series, within: int) -> pd.core.series.Series: """A more challenging mask to apply. When passed a series of floats, return all values within the given rage of: - the minimum value - the 1st quartile value - the second quartile value - the mean - the third quartile value - the maximum value You may want to brush up on some simple statistics to help you here. Also, the series is passed to you sorted assending. Be sure that you don't return values out of sequence. So, for example if you mean is 5.0 and within is 0.1 return all value between 4.9 and 5.1 inclusive :param ser: Series to perform operation on :param within: The value to calculate the range of number within """ summary_values = ser.quantile([0, 0.25, 0.5, 0.75, 1]) mean = ser.mean() minimum = summary_values[0.00] first_quar = summary_values[0.25] median = summary_values[0.50] third_quar = summary_values[0.75] maximum = summary_values[1.00] boolean_mask = [any([(mean - within <= i <= mean + within), (minimum - within <= i <= minimum + within), (first_quar - within <= i <= first_quar + within), (median - within <= i <= median + within), (third_quar - within <= i <= third_quar + within), (maximum - within <= i <= maximum + within)]) for i in ser] return ser[boolean_mask] pass
Python
def calc_months_passed(year, month, day): """Construct a date object from the passed in arguments. If this fails due to bad inputs reraise the exception. Also if the new date is < START_DATE raise a ValueError. Then calculate how many months have passed since the START_DATE constant. We suggest using dateutil.relativedelta! One rule: if a new month is >= 10 (MIN_DAYS_TO_COUNT_AS_MONTH) days in, it counts as an extra month. For example: date(2018, 11, 10) = 9 days in => 0 months date(2018, 11, 11) = 10 days in => 1 month date(2018, 12, 11) = 1 month + 10 days in => 2 months date(2019, 12, 11) = 1 year + 1 month + 10 days in => 14 months etc. See the tests for more examples. Return the number of months passed int. """ target_date = date(year, month, day) delta = relativedelta(target_date, START_DATE) if delta.days < 0 or delta.months < 0 or delta.years < 0: raise ValueError number_of_months = 0 number_of_months += delta.years * MONTHS_PER_YEAR number_of_months += delta.months if delta.days >= 10: number_of_months += 1 return number_of_months pass
def calc_months_passed(year, month, day): """Construct a date object from the passed in arguments. If this fails due to bad inputs reraise the exception. Also if the new date is < START_DATE raise a ValueError. Then calculate how many months have passed since the START_DATE constant. We suggest using dateutil.relativedelta! One rule: if a new month is >= 10 (MIN_DAYS_TO_COUNT_AS_MONTH) days in, it counts as an extra month. For example: date(2018, 11, 10) = 9 days in => 0 months date(2018, 11, 11) = 10 days in => 1 month date(2018, 12, 11) = 1 month + 10 days in => 2 months date(2019, 12, 11) = 1 year + 1 month + 10 days in => 14 months etc. See the tests for more examples. Return the number of months passed int. """ target_date = date(year, month, day) delta = relativedelta(target_date, START_DATE) if delta.days < 0 or delta.months < 0 or delta.years < 0: raise ValueError number_of_months = 0 number_of_months += delta.years * MONTHS_PER_YEAR number_of_months += delta.months if delta.days >= 10: number_of_months += 1 return number_of_months pass
Python
def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" automakers = [] relevant_data = [] for i in range(len(data)): if data[i]['year'] == year: relevant_data.append(data[i]) for i in range(len(relevant_data)): automakers.append(relevant_data[i]['automaker']) automaker_count = Counter(automakers) return automaker_count.most_common(1)[0][0] pass
def most_prolific_automaker(year): """Given year 'year' return the automaker that released the highest number of new car models""" automakers = [] relevant_data = [] for i in range(len(data)): if data[i]['year'] == year: relevant_data.append(data[i]) for i in range(len(relevant_data)): automakers.append(relevant_data[i]['automaker']) automaker_count = Counter(automakers) return automaker_count.most_common(1)[0][0] pass
Python
def filter_entries_by_tag(search, entry): """Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches). Returns bool: True if match, False if not. Supported searches: 1. If & in search do AND match, e.g. flask&api should match entries with both tags 2. Elif | in search do an OR match, e.g. flask|django should match entries with either tag 3. Else: match if search is in tags """ Truth = [] search = search.lower() if '&' in search: for term in search.split('&'): Truth.append(term.lower() in entry.tags) return all(Truth) elif '|' in search: for term in search.split('|'): Truth.append(term.lower() in entry.tags) return any(Truth) else: return search.lower() in entry.tags pass
def filter_entries_by_tag(search, entry): """Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches). Returns bool: True if match, False if not. Supported searches: 1. If & in search do AND match, e.g. flask&api should match entries with both tags 2. Elif | in search do an OR match, e.g. flask|django should match entries with either tag 3. Else: match if search is in tags """ Truth = [] search = search.lower() if '&' in search: for term in search.split('&'): Truth.append(term.lower() in entry.tags) return all(Truth) elif '|' in search: for term in search.split('|'): Truth.append(term.lower() in entry.tags) return any(Truth) else: return search.lower() in entry.tags pass
Python
def main(): """Entry point to the program 1. Call get_feed_entries and store them in entries 2. Initiate an infinite loop 3. Ask user for a search term: - if enter was hit (empty string), print 'Please provide a search term' - if 'q' was entered, print 'Bye' and exit/break the infinite loop 4. Filter/match the entries (see filter_entries_by_tag docstring) 5. Print the title of each match ordered by date ascending 6. Secondly, print the number of matches: 'n entries matched' (use entry if only 1 match) """ entries = get_feed_entries() while True: search = input('Enter a search term:') if search == '': print('Please provide a search term') elif search == 'q': print('Bye') break else: matching_entries = [] for entry in entries: if filter_entries_by_tag(search, entry) == True: matching_entries.append(entry) matching_entries = sorted(matching_entries, key=lambda x: x.date) matches = len(matching_entries) for entry in matching_entries: print(entry.title) if matches == 1: print(f'{matches} entry matched') else: print(f'{matches} entries matched') pass
def main(): """Entry point to the program 1. Call get_feed_entries and store them in entries 2. Initiate an infinite loop 3. Ask user for a search term: - if enter was hit (empty string), print 'Please provide a search term' - if 'q' was entered, print 'Bye' and exit/break the infinite loop 4. Filter/match the entries (see filter_entries_by_tag docstring) 5. Print the title of each match ordered by date ascending 6. Secondly, print the number of matches: 'n entries matched' (use entry if only 1 match) """ entries = get_feed_entries() while True: search = input('Enter a search term:') if search == '': print('Please provide a search term') elif search == 'q': print('Bye') break else: matching_entries = [] for entry in entries: if filter_entries_by_tag(search, entry) == True: matching_entries.append(entry) matching_entries = sorted(matching_entries, key=lambda x: x.date) matches = len(matching_entries) for entry in matching_entries: print(entry.title) if matches == 1: print(f'{matches} entry matched') else: print(f'{matches} entries matched') pass
Python
def dedup_and_title_case_names(names): """Should return a list of title cased names, each name appears only once""" outlist = list(set(names)) final = [name.title() for name in outlist] return final pass
def dedup_and_title_case_names(names): """Should return a list of title cased names, each name appears only once""" outlist = list(set(names)) final = [name.title() for name in outlist] return final pass
Python
def sort_by_surname_desc(names): """Returns names list sorted desc by surname""" names = dedup_and_title_case_names(names) secondname = lambda elem: elem.split(" ",2)[1] final = sorted(names, key=secondname, reverse=True) return final # ...
def sort_by_surname_desc(names): """Returns names list sorted desc by surname""" names = dedup_and_title_case_names(names) secondname = lambda elem: elem.split(" ",2)[1] final = sorted(names, key=secondname, reverse=True) return final # ...
Python
def calc_max_uptime(reboots): """Parse the passed in reboots output, extracting the datetimes. Calculate the highest uptime between reboots = highest diff between extracted reboot datetimes. Return a tuple of this max uptime in days (int) and the date (str) this record was hit. For the output above it would be (30, '2019-02-17'), but we use different outputs in the tests as well ... """ timestamps = [] for line in reboots.splitlines()[1:]: timestamps.append(datetime.strptime(line[-16:], "%a %b %d %H:%M")) timestamps = sorted([i.replace(year=2020) for i in timestamps]) timedeltas = [] for i in range(1, len(timestamps)): to_append = ((timestamps[i]-timestamps[i-1]), timestamps[i]) timedeltas.append(to_append) sorted_stamps = sorted(timedeltas, key=lambda x: x[0], reverse=True) #print(sorted_stamps) to_return = max(timedeltas) actual_return = (int(to_return[0].days), str(to_return[1].date())) return actual_return pass
def calc_max_uptime(reboots): """Parse the passed in reboots output, extracting the datetimes. Calculate the highest uptime between reboots = highest diff between extracted reboot datetimes. Return a tuple of this max uptime in days (int) and the date (str) this record was hit. For the output above it would be (30, '2019-02-17'), but we use different outputs in the tests as well ... """ timestamps = [] for line in reboots.splitlines()[1:]: timestamps.append(datetime.strptime(line[-16:], "%a %b %d %H:%M")) timestamps = sorted([i.replace(year=2020) for i in timestamps]) timedeltas = [] for i in range(1, len(timestamps)): to_append = ((timestamps[i]-timestamps[i-1]), timestamps[i]) timedeltas.append(to_append) sorted_stamps = sorted(timedeltas, key=lambda x: x[0], reverse=True) #print(sorted_stamps) to_return = max(timedeltas) actual_return = (int(to_return[0].days), str(to_return[1].date())) return actual_return pass
Python
def filter_accents(text): """Return a sequence of accented characters found in the passed in lowercased text string """ accented_chars = set() for i in text: if unicodedata.decomposition(i) != "": accented_chars.add(i.lower()) return sorted(list(accented_chars)) pass
def filter_accents(text): """Return a sequence of accented characters found in the passed in lowercased text string """ accented_chars = set() for i in text: if unicodedata.decomposition(i) != "": accented_chars.add(i.lower()) return sorted(list(accented_chars)) pass
Python
def calc_total_course_duration(timestamps): """Takes timestamps list as returned by get_all_timestamps and calculates the total duration as HH:MM:SS""" answer = timedelta() for times in timestamps: m, s = times.split(':') answer += timedelta(minutes=int(m), seconds=int(s)) return datetime.strptime(str(answer), '%H:%M:%S').strftime('%H:%M:%S') pass
def calc_total_course_duration(timestamps): """Takes timestamps list as returned by get_all_timestamps and calculates the total duration as HH:MM:SS""" answer = timedelta() for times in timestamps: m, s = times.split(':') answer += timedelta(minutes=int(m), seconds=int(s)) return datetime.strptime(str(answer), '%H:%M:%S').strftime('%H:%M:%S') pass
Python
def is_palindrome(word): """Return if word is palindrome, 'madam' would be one. Case insensitive, so Madam is valid too. It should work for phrases too so strip all but alphanumeric chars. So "No 'x' in 'Nixon'" should pass (see tests for more)""" stripped_word = "".join([letter.lower() for letter in word if letter.isalnum()]) check_num = int(len(stripped_word)) return stripped_word[:check_num] == stripped_word[check_num::-1] pass
def is_palindrome(word): """Return if word is palindrome, 'madam' would be one. Case insensitive, so Madam is valid too. It should work for phrases too so strip all but alphanumeric chars. So "No 'x' in 'Nixon'" should pass (see tests for more)""" stripped_word = "".join([letter.lower() for letter in word if letter.isalnum()]) check_num = int(len(stripped_word)) return stripped_word[:check_num] == stripped_word[check_num::-1] pass
Python
def parse_social_platforms_string(): """Convert the social_platforms string above into a dict where keys = social platformsname and values = validator namedtuples""" platforms_raw_list = social_platforms.splitlines() raw_list_to_split = iter(platforms_raw_list) number_to_split_on = platforms_raw_list.index("") + 1 number_of_sublists = platforms_raw_list.count("") + 1 length_to_split = [number_to_split_on]*number_of_sublists split_string = [list(islice(raw_list_to_split, length)) for length in length_to_split] platforms_dict = dict() for string in split_string: platform, min_range, max_range, regex, *_ = string min_range = int(min_range.split()[-1]) max_range = int(max_range.split()[-1]) + 1 range_object = range(min_range, max_range) regex = regex.split(":")[-1].split() compile_string = '^[' for term in regex: if term != '.': compile_string += term else: compile_string += "\\"+term compile_string +=']+$' platforms_dict[platform] = Validator(range_object, re.compile(compile_string)) return platforms_dict pass
def parse_social_platforms_string(): """Convert the social_platforms string above into a dict where keys = social platformsname and values = validator namedtuples""" platforms_raw_list = social_platforms.splitlines() raw_list_to_split = iter(platforms_raw_list) number_to_split_on = platforms_raw_list.index("") + 1 number_of_sublists = platforms_raw_list.count("") + 1 length_to_split = [number_to_split_on]*number_of_sublists split_string = [list(islice(raw_list_to_split, length)) for length in length_to_split] platforms_dict = dict() for string in split_string: platform, min_range, max_range, regex, *_ = string min_range = int(min_range.split()[-1]) max_range = int(max_range.split()[-1]) + 1 range_object = range(min_range, max_range) regex = regex.split(":")[-1].split() compile_string = '^[' for term in regex: if term != '.': compile_string += term else: compile_string += "\\"+term compile_string +=']+$' platforms_dict[platform] = Validator(range_object, re.compile(compile_string)) return platforms_dict pass
Python
def validate_username(platform, username): """Receives platforms(Twitter, Facebook or Reddit) and username string, raise a ValueError if the wrong platform is passed in, return True/False if username is valid for entered platform""" all_validators = parse_social_platforms_string() if platform not in all_validators.keys(): raise ValueError user_platform = all_validators[platform] if len(username) in user_platform.range: return bool(user_platform.regex.match(username)) else: return False
def validate_username(platform, username): """Receives platforms(Twitter, Facebook or Reddit) and username string, raise a ValueError if the wrong platform is passed in, return True/False if username is valid for entered platform""" all_validators = parse_social_platforms_string() if platform not in all_validators.keys(): raise ValueError user_platform = all_validators[platform] if len(username) in user_platform.range: return bool(user_platform.regex.match(username)) else: return False
Python
def high_low_record_breakers_for_2015(): """Extract the high and low record breaking temperatures for 2015 The expected value will be a tuple with the highest and lowest record breaking temperatures for 2015 as compared to the temperature data provided. NOTE: The date values should not have any timestamps, should be a datetime.date() object. The temperatures in the dataset are in tenths of degrees Celsius, so you must divide them by 10 Possible way to tackle this challenge: 1. Create a DataFrame from the DATA_FILE dataset. 2. Manipulate the data to extract the following: * Extract highest temperatures for each day / station pair between 2005-2015 * Extract lowest temperatures for each day / station between 2005-2015 * Remove February 29th from the dataset to work with only 365 days 3. Separate data into two separate DataFrames: * high/low temperatures between 2005-2014 * high/low temperatures for 2015 4. Iterate over the 2005-2014 data and compare to the 2015 data: * For any temperature that is higher/lower in 2015 extract ID, Date, Value 5. From the record breakers in 2015, extract the high/low of all the temperatures * Return those as STATION namedtuples, (high_2015, low_2015) """ temp_data = pd.read_csv(DATA_FILE) temp_data['Date'] = pd.to_datetime(temp_data['Date']) temp_data['No 29th'] = temp_data['Date'].dt.day != 29 temp_data['No Feb'] = temp_data['Date'].dt.month != 2 temp_data['No Feb 29'] = temp_data[['No 29th', 'No Feb']].any(axis='columns') temp_data = temp_data[temp_data['No Feb 29']][['ID', 'Date', 'Element','Data_Value']] temp_data['Temp'] = temp_data['Data_Value']/10 pivoted_temp = pd.pivot_table(temp_data, index=['ID','Date'], columns='Element', values='Temp') pivoted_temp = pivoted_temp.reset_index() pivoted_05_to_14 = pivoted_temp[pivoted_temp['Date'].dt.year != 2015].copy() pivoted_15 = pivoted_temp[pivoted_temp['Date'].dt.year == 2015].copy() #print(pivoted_05_to_14.head()) #print(pivoted_15.head()) pivoted_05_to_14['MonthDay'] = pivoted_05_to_14['Date'].dt.dayofyear pivoted_15['MonthDay'] = pivoted_15['Date'].dt.dayofyear max_temps_0514 = pivoted_05_to_14.groupby(['ID', 'MonthDay'])['TMAX'].max().reset_index() min_temps_0514 = pivoted_05_to_14.groupby(['ID', 'MonthDay'])['TMIN'].min().reset_index() comparison_df = pd.merge(pivoted_15, max_temps_0514, how='left', left_on=['ID','MonthDay'], right_on=['ID','MonthDay']) comparison_df = pd.merge(comparison_df, min_temps_0514, how='left', left_on=['ID','MonthDay'], right_on=['ID','MonthDay']) #print(comparison_df.head()) comparison_df['high 2015'] = comparison_df['TMAX_x'] > comparison_df['TMAX_y'] comparison_df['low 2015'] = comparison_df['TMIN_x'] < comparison_df['TMIN_y'] high_df = comparison_df[comparison_df['high 2015']] low_df = comparison_df[comparison_df['low 2015']] high_df = high_df[['ID','Date','TMAX_x']] low_df = low_df[['ID','Date','TMIN_x']] high_value = high_df.loc[high_df['TMAX_x'].idxmax()] low_value = low_df.loc[low_df['TMIN_x'].idxmin()] #print(high_value['ID']) high_2015 = STATION(ID=high_value['ID'], Date=high_value['Date'].date(), Value=high_value['TMAX_x']) low_2015 = STATION(ID=low_value['ID'], Date=low_value['Date'].date(), Value=low_value['TMIN_x']) return (high_2015, low_2015) pass
def high_low_record_breakers_for_2015(): """Extract the high and low record breaking temperatures for 2015 The expected value will be a tuple with the highest and lowest record breaking temperatures for 2015 as compared to the temperature data provided. NOTE: The date values should not have any timestamps, should be a datetime.date() object. The temperatures in the dataset are in tenths of degrees Celsius, so you must divide them by 10 Possible way to tackle this challenge: 1. Create a DataFrame from the DATA_FILE dataset. 2. Manipulate the data to extract the following: * Extract highest temperatures for each day / station pair between 2005-2015 * Extract lowest temperatures for each day / station between 2005-2015 * Remove February 29th from the dataset to work with only 365 days 3. Separate data into two separate DataFrames: * high/low temperatures between 2005-2014 * high/low temperatures for 2015 4. Iterate over the 2005-2014 data and compare to the 2015 data: * For any temperature that is higher/lower in 2015 extract ID, Date, Value 5. From the record breakers in 2015, extract the high/low of all the temperatures * Return those as STATION namedtuples, (high_2015, low_2015) """ temp_data = pd.read_csv(DATA_FILE) temp_data['Date'] = pd.to_datetime(temp_data['Date']) temp_data['No 29th'] = temp_data['Date'].dt.day != 29 temp_data['No Feb'] = temp_data['Date'].dt.month != 2 temp_data['No Feb 29'] = temp_data[['No 29th', 'No Feb']].any(axis='columns') temp_data = temp_data[temp_data['No Feb 29']][['ID', 'Date', 'Element','Data_Value']] temp_data['Temp'] = temp_data['Data_Value']/10 pivoted_temp = pd.pivot_table(temp_data, index=['ID','Date'], columns='Element', values='Temp') pivoted_temp = pivoted_temp.reset_index() pivoted_05_to_14 = pivoted_temp[pivoted_temp['Date'].dt.year != 2015].copy() pivoted_15 = pivoted_temp[pivoted_temp['Date'].dt.year == 2015].copy() #print(pivoted_05_to_14.head()) #print(pivoted_15.head()) pivoted_05_to_14['MonthDay'] = pivoted_05_to_14['Date'].dt.dayofyear pivoted_15['MonthDay'] = pivoted_15['Date'].dt.dayofyear max_temps_0514 = pivoted_05_to_14.groupby(['ID', 'MonthDay'])['TMAX'].max().reset_index() min_temps_0514 = pivoted_05_to_14.groupby(['ID', 'MonthDay'])['TMIN'].min().reset_index() comparison_df = pd.merge(pivoted_15, max_temps_0514, how='left', left_on=['ID','MonthDay'], right_on=['ID','MonthDay']) comparison_df = pd.merge(comparison_df, min_temps_0514, how='left', left_on=['ID','MonthDay'], right_on=['ID','MonthDay']) #print(comparison_df.head()) comparison_df['high 2015'] = comparison_df['TMAX_x'] > comparison_df['TMAX_y'] comparison_df['low 2015'] = comparison_df['TMIN_x'] < comparison_df['TMIN_y'] high_df = comparison_df[comparison_df['high 2015']] low_df = comparison_df[comparison_df['low 2015']] high_df = high_df[['ID','Date','TMAX_x']] low_df = low_df[['ID','Date','TMIN_x']] high_value = high_df.loc[high_df['TMAX_x'].idxmax()] low_value = low_df.loc[low_df['TMIN_x'].idxmin()] #print(high_value['ID']) high_2015 = STATION(ID=high_value['ID'], Date=high_value['Date'].date(), Value=high_value['TMAX_x']) low_2015 = STATION(ID=low_value['ID'], Date=low_value['Date'].date(), Value=low_value['TMIN_x']) return (high_2015, low_2015) pass
Python
def retry(func): """Complete this decorator, make sure you print the exception thrown""" @wraps(func) # ... retry MAX_RETRIES times def looper(*args, **kwargs): for i in range(MAX_RETRIES): # make sure you include this for testing: try: func(*args, **kwargs) return func(*args, **kwargs) except Exception as exc: print(exc) raise MaxRetriesException # except Exception as exc: # print(exc) # ... # and use wraps to preserve docstring # return looper pass
def retry(func): """Complete this decorator, make sure you print the exception thrown""" @wraps(func) # ... retry MAX_RETRIES times def looper(*args, **kwargs): for i in range(MAX_RETRIES): # make sure you include this for testing: try: func(*args, **kwargs) return func(*args, **kwargs) except Exception as exc: print(exc) raise MaxRetriesException # except Exception as exc: # print(exc) # ... # and use wraps to preserve docstring # return looper pass
Python
def display_books(books, limit=10, year=None): """Prints the specified books to the console :param books: list of all the books :param limit: integer that indicates how many books to return :param year: integer indicating the oldest year to include :return: None """ if year == None: max_len = len(books) books = iter(books) for _ in range(min(limit, max_len)): print(next(books)) else: books = filter(lambda x: int(x.year) >= year, books) for _ in range(limit): print(next(books)) return None pass
def display_books(books, limit=10, year=None): """Prints the specified books to the console :param books: list of all the books :param limit: integer that indicates how many books to return :param year: integer indicating the oldest year to include :return: None """ if year == None: max_len = len(books) books = iter(books) for _ in range(min(limit, max_len)): print(next(books)) else: books = filter(lambda x: int(x.year) >= year, books) for _ in range(limit): print(next(books)) return None pass
Python
def load_data(): """Loads the data from the html file Creates the soup object and processes it to extract the information required to create the Book class objects and returns a sorted list of Book objects. Books should be sorted by rating, year, title, and then by author's last name. After the books have been sorted, the rank of each book should be updated to indicate this new sorting order.The Book object with the highest rating should be first and go down from there. """ soup = _get_soup(html_file) book_blocks = soup.find_all('div', class_='book accepted normal') book_list = [] rank_setting = 1 for book in book_blocks: if 'python' not in book.find('h2', class_='main').text.lower(): continue try: title = book.find('h2', class_='main').text author_name = book.find('h3', class_='authors').find('a').text.split() first_name, last_name = " ".join(author_name[:-1]), author_name[-1] author = last_name + ", " + first_name year = book.find('span', class_='date').text[3:] rank = rank_setting rank_setting += 1 rating = float(book.find('span', class_='rating').text) book_list.append(Book(title, author, year, rank, rating)) except: continue sort_spec = ((lambda x: x.rating, True), (lambda x: x.year, False), (lambda x:x.title.lower(), False), (lambda x: x.author, False)) for sort_func, reverse_value in sort_spec[::-1]: book_list.sort(key = sort_func, reverse=reverse_value) updated_rank = 1 for book in book_list: book.rank = updated_rank updated_rank += 1 return book_list pass
def load_data(): """Loads the data from the html file Creates the soup object and processes it to extract the information required to create the Book class objects and returns a sorted list of Book objects. Books should be sorted by rating, year, title, and then by author's last name. After the books have been sorted, the rank of each book should be updated to indicate this new sorting order.The Book object with the highest rating should be first and go down from there. """ soup = _get_soup(html_file) book_blocks = soup.find_all('div', class_='book accepted normal') book_list = [] rank_setting = 1 for book in book_blocks: if 'python' not in book.find('h2', class_='main').text.lower(): continue try: title = book.find('h2', class_='main').text author_name = book.find('h3', class_='authors').find('a').text.split() first_name, last_name = " ".join(author_name[:-1]), author_name[-1] author = last_name + ", " + first_name year = book.find('span', class_='date').text[3:] rank = rank_setting rank_setting += 1 rating = float(book.find('span', class_='rating').text) book_list.append(Book(title, author, year, rank, rating)) except: continue sort_spec = ((lambda x: x.rating, True), (lambda x: x.year, False), (lambda x:x.title.lower(), False), (lambda x: x.author, False)) for sort_func, reverse_value in sort_spec[::-1]: book_list.sort(key = sort_func, reverse=reverse_value) updated_rank = 1 for book in book_list: book.rank = updated_rank updated_rank += 1 return book_list pass
Python
def island_size(map_): """Hint: use the get_others helper Input: the map Output: the perimeter of the island """ perimeter = 0 # your code here rows = len(map_) cols = len(map_[0]) for r in range(rows): for c in range(cols): perimeter += get_others(map_, r, c) return perimeter
def island_size(map_): """Hint: use the get_others helper Input: the map Output: the perimeter of the island """ perimeter = 0 # your code here rows = len(map_) cols = len(map_[0]) for r in range(rows): for c in range(cols): perimeter += get_others(map_, r, c) return perimeter
Python
def calculate_flux(XYZ: str) -> list: """Read the data in from xyz.csv add two new columns, one to calculate dollar flux, and the other to calculate percentage flux return as a list of tuples """ df = pd.read_csv(XYZ) df['Dollar Flux'] = df['12/31/20'] - df['12/31/19'] df['Percentage Flux'] = df['12/31/20'] / df['12/31/19'] - 1 return list(tuple(df.loc[i]) for i in range(df.shape[0]))
def calculate_flux(XYZ: str) -> list: """Read the data in from xyz.csv add two new columns, one to calculate dollar flux, and the other to calculate percentage flux return as a list of tuples """ df = pd.read_csv(XYZ) df['Dollar Flux'] = df['12/31/20'] - df['12/31/19'] df['Percentage Flux'] = df['12/31/20'] / df['12/31/19'] - 1 return list(tuple(df.loc[i]) for i in range(df.shape[0]))
Python
def identify_flux(xyz: list) -> list: """Load the list of tuples, iterate through each item and determine if it is above both thresholds. if so, add to the list """ flagged_lines = [tup for tup in xyz if abs(tup[3]) > THRESHOLDS[0] and abs(tup[4]) > THRESHOLDS[1]] return flagged_lines
def identify_flux(xyz: list) -> list: """Load the list of tuples, iterate through each item and determine if it is above both thresholds. if so, add to the list """ flagged_lines = [tup for tup in xyz if abs(tup[3]) > THRESHOLDS[0] and abs(tup[4]) > THRESHOLDS[1]] return flagged_lines
Python
def pascal(N: int) -> List[int]: """ Return the Nth row of Pascal triangle """ # you code ... if N == 0: return [] triangle_rows = [] for i in range(1, N+1): add_row = [None]*i add_row[0] = 1 add_row[-1] = 1 if i == 3: add_row[1] = triangle_rows[1][0] + triangle_rows[1][1] if i >= 4: for j in range(1,i-1): add_row[j] = triangle_rows[i-2][j-1] + triangle_rows[i-2][j] triangle_rows.append(add_row) return triangle_rows[N-1]
def pascal(N: int) -> List[int]: """ Return the Nth row of Pascal triangle """ # you code ... if N == 0: return [] triangle_rows = [] for i in range(1, N+1): add_row = [None]*i add_row[0] = 1 add_row[-1] = 1 if i == 3: add_row[1] = triangle_rows[1][0] + triangle_rows[1][1] if i >= 4: for j in range(1,i-1): add_row[j] = triangle_rows[i-2][j-1] + triangle_rows[i-2][j] triangle_rows.append(add_row) return triangle_rows[N-1]
Python
def _create_soup_obj(url): """Need utf-8 to properly parse emojis""" resp = requests.get(url) resp.encoding = "utf-8" return BeautifulSoup(resp.text, "html.parser")
def _create_soup_obj(url): """Need utf-8 to properly parse emojis""" resp = requests.get(url) resp.encoding = "utf-8" return BeautifulSoup(resp.text, "html.parser")
Python
def within_schedule(utc, *timezones): """Receive a utc datetime and one or more timezones and check if they are all within schedule (MEETING_HOURS)""" times = [] timezone_list = list(timezones) for zone in timezone_list: if zone not in TIMEZONES: raise ValueError tz = pytz.timezone(zone) times.append(pytz.utc.localize(utc).astimezone(tz)) boolean = [] for time in times: if time.hour in MEETING_HOURS: boolean.append(True) else: boolean.append(False) return all(boolean) pass
def within_schedule(utc, *timezones): """Receive a utc datetime and one or more timezones and check if they are all within schedule (MEETING_HOURS)""" times = [] timezone_list = list(timezones) for zone in timezone_list: if zone not in TIMEZONES: raise ValueError tz = pytz.timezone(zone) times.append(pytz.utc.localize(utc).astimezone(tz)) boolean = [] for time in times: if time.hour in MEETING_HOURS: boolean.append(True) else: boolean.append(False) return all(boolean) pass
Python
def traffic_light(): """Returns an itertools.cycle iterator that when iterated over returns State namedtuples as shown in the Bite's description""" state_red = State('red', 'Stop', 2) state_green = State('green', 'Go', 2) state_amber = State('amber', 'Caution', 0.5) return cycle([state_red, state_green, state_amber]) pass
def traffic_light(): """Returns an itertools.cycle iterator that when iterated over returns State namedtuples as shown in the Bite's description""" state_red = State('red', 'Stop', 2) state_green = State('green', 'Go', 2) state_amber = State('amber', 'Caution', 0.5) return cycle([state_red, state_green, state_amber]) pass
Python
def is_anagram(word1, word2): """Receives two words and returns True/False (boolean) if word2 is an anagram of word1, ignore case and spacing. About anagrams: https://en.wikipedia.org/wiki/Anagram""" word1_list = [i for i in word1.lower() if i != " "] word2_list = [j for j in word2.lower() if j != " "] word1_list.sort() word2_list.sort() return word1_list == word2_list pass
def is_anagram(word1, word2): """Receives two words and returns True/False (boolean) if word2 is an anagram of word1, ignore case and spacing. About anagrams: https://en.wikipedia.org/wiki/Anagram""" word1_list = [i for i in word1.lower() if i != " "] word2_list = [j for j in word2.lower() if j != " "] word1_list.sort() word2_list.sort() return word1_list == word2_list pass
Python
def generate_enchantments(soup): """Generates a dictionary of Enchantment objects With the key being the id_name of the enchantment. """ def extract_items(string): new_string = string.split('/')[-1].split('.')[0] replacements = (('enchanted',''), ('iron',''), ('sm',''), ('fishing_rod', 'fishing rod')) for r in replacements: new_string = new_string.replace(*r) item_list = ['fishing_rod' if item == 'fishing rod' else item for item in new_string.split("_")] item_list = list(filter(None,item_list)) return item_list enchantment_dict = defaultdict(Enchantment) enchantment_table = soup.find('table', id='minecraft_items') enchantment_table_data = enchantment_table.find_all('tr') for row in enchantment_table_data[1:]: #print(row) name = row.find('a').text #print(name) id_name = row.find('em').text #print(id_name) max_level = row.find_next('td').find_next('td').text #print(max_level) description = row.find('td', class_='hidden-xs').text #print(description) items = extract_items(row.find('img', class_='img-rounded')['data-src']) #print(items) entry = (id_name, name, max_level, description, items) enchantment_dict[id_name] = Enchantment(*entry) #print('-------------------------------') return enchantment_dict pass
def generate_enchantments(soup): """Generates a dictionary of Enchantment objects With the key being the id_name of the enchantment. """ def extract_items(string): new_string = string.split('/')[-1].split('.')[0] replacements = (('enchanted',''), ('iron',''), ('sm',''), ('fishing_rod', 'fishing rod')) for r in replacements: new_string = new_string.replace(*r) item_list = ['fishing_rod' if item == 'fishing rod' else item for item in new_string.split("_")] item_list = list(filter(None,item_list)) return item_list enchantment_dict = defaultdict(Enchantment) enchantment_table = soup.find('table', id='minecraft_items') enchantment_table_data = enchantment_table.find_all('tr') for row in enchantment_table_data[1:]: #print(row) name = row.find('a').text #print(name) id_name = row.find('em').text #print(id_name) max_level = row.find_next('td').find_next('td').text #print(max_level) description = row.find('td', class_='hidden-xs').text #print(description) items = extract_items(row.find('img', class_='img-rounded')['data-src']) #print(items) entry = (id_name, name, max_level, description, items) enchantment_dict[id_name] = Enchantment(*entry) #print('-------------------------------') return enchantment_dict pass
Python
def generate_items(data): """Generates a dictionary of Item objects With the key being the item name. """ item_flat_dict = defaultdict(list) item_dict = defaultdict(Item) for enchantment in data: for item in data[enchantment].items: item_flat_dict[item].append(data[enchantment]) for item in sorted(item_flat_dict.keys()): item_dict[item] = Item(item, item_flat_dict[item]) return item_dict pass
def generate_items(data): """Generates a dictionary of Item objects With the key being the item name. """ item_flat_dict = defaultdict(list) item_dict = defaultdict(Item) for enchantment in data: for item in data[enchantment].items: item_flat_dict[item].append(data[enchantment]) for item in sorted(item_flat_dict.keys()): item_dict[item] = Item(item, item_flat_dict[item]) return item_dict pass
Python
def main(): """This function is here to help you test your final code. Once complete, the print out should match what's at the bottom of this file""" soup = get_soup() enchantment_data = generate_enchantments(soup) minecraft_items = generate_items(enchantment_data) for item in minecraft_items: print(minecraft_items[item], "\n")
def main(): """This function is here to help you test your final code. Once complete, the print out should match what's at the bottom of this file""" soup = get_soup() enchantment_data = generate_enchantments(soup) minecraft_items = generate_items(enchantment_data) for item in minecraft_items: print(minecraft_items[item], "\n")
Python
def calc_cell(self, x, y): """Takes x and y coords and returns the re-calculated result""" if x > self.x or y > self.y: raise IndexError return x * y pass
def calc_cell(self, x, y): """Takes x and y coords and returns the re-calculated result""" if x > self.x or y > self.y: raise IndexError return x * y pass
Python
def py2_earth_hours_left(start_date=BITE_CREATED_DT): """Return how many hours, rounded to 2 decimals, Python 2 has left on Planet Earth (calculated from start_date)""" delta = PY2_DEATH_DT - start_date return round(delta.days *24 + delta.seconds / 3600,2) pass
def py2_earth_hours_left(start_date=BITE_CREATED_DT): """Return how many hours, rounded to 2 decimals, Python 2 has left on Planet Earth (calculated from start_date)""" delta = PY2_DEATH_DT - start_date return round(delta.days *24 + delta.seconds / 3600,2) pass
Python
def py2_miller_min_left(start_date=BITE_CREATED_DT): """Return how many minutes, rounded to 2 decimals, Python 2 has left on Planet Miller (calculated from start_date)""" delta = PY2_DEATH_DT - start_date earth_hours = round(delta.days *24 + delta.seconds / 3600,2) conversion = 365*24*7 miller_minutes = round(earth_hours/conversion * 60,2) return miller_minutes pass
def py2_miller_min_left(start_date=BITE_CREATED_DT): """Return how many minutes, rounded to 2 decimals, Python 2 has left on Planet Miller (calculated from start_date)""" delta = PY2_DEATH_DT - start_date earth_hours = round(delta.days *24 + delta.seconds / 3600,2) conversion = 365*24*7 miller_minutes = round(earth_hours/conversion * 60,2) return miller_minutes pass
Python
def extract_dates(data): """Extract unique dates from DB table representation as shown in Bite""" dates = [] for line in data.splitlines(): if line[6:8] == "20": dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date()) return list(set(dates)) pass
def extract_dates(data): """Extract unique dates from DB table representation as shown in Bite""" dates = [] for line in data.splitlines(): if line[6:8] == "20": dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date()) return list(set(dates)) pass
Python
def calculate_streak(dates): """Receives sequence (set) of dates and returns number of days on coding streak. Note that a coding streak is defined as consecutive days coded since yesterday, because today is not over yet, however if today was coded, it counts too of course. So as today is 12th of Nov, having dates 11th/10th/9th of Nov in the table makes for a 3 days coding streak. See the tests for more examples that will be used to pass your code. """ streak_counter = 0 max_streak = 0 sorted_dates = sorted(dates) if TODAY not in sorted_dates and TODAY != (sorted_dates[-1] + timedelta(days=1)): return max_streak for i in range(1, len(sorted_dates)): if (sorted_dates[i] - sorted_dates[i-1]).days == 1: streak_counter += 1 else: max_streak = streak_counter streak_counter = 0 max_streak = max(max_streak, streak_counter+1) return max_streak pass
def calculate_streak(dates): """Receives sequence (set) of dates and returns number of days on coding streak. Note that a coding streak is defined as consecutive days coded since yesterday, because today is not over yet, however if today was coded, it counts too of course. So as today is 12th of Nov, having dates 11th/10th/9th of Nov in the table makes for a 3 days coding streak. See the tests for more examples that will be used to pass your code. """ streak_counter = 0 max_streak = 0 sorted_dates = sorted(dates) if TODAY not in sorted_dates and TODAY != (sorted_dates[-1] + timedelta(days=1)): return max_streak for i in range(1, len(sorted_dates)): if (sorted_dates[i] - sorted_dates[i-1]).days == 1: streak_counter += 1 else: max_streak = streak_counter streak_counter = 0 max_streak = max(max_streak, streak_counter+1) return max_streak pass
Python
def sysinfo_scrape(output): """Scrapes the output from screenfetch and returns a dictionary""" lines = [line for line in output.splitlines()] start_index = lines[1].rfind(' ') cleaned_lines = [] cleaned_lines.append(lines[1][start_index+1:]) for line in lines[2:]: if line[start_index+1:].strip(): cleaned_lines.append(line[start_index+1:]) sysinfo_dict = {} sysinfo_dict['Name'] = cleaned_lines[0] for line in cleaned_lines[1:]: key, value = line.split(': ') sysinfo_dict[key] = value return sysinfo_dict pass
def sysinfo_scrape(output): """Scrapes the output from screenfetch and returns a dictionary""" lines = [line for line in output.splitlines()] start_index = lines[1].rfind(' ') cleaned_lines = [] cleaned_lines.append(lines[1][start_index+1:]) for line in lines[2:]: if line[start_index+1:].strip(): cleaned_lines.append(line[start_index+1:]) sysinfo_dict = {} sysinfo_dict['Name'] = cleaned_lines[0] for line in cleaned_lines[1:]: key, value = line.split(': ') sysinfo_dict[key] = value return sysinfo_dict pass
Python
def create_parser(): """TODO: Create an ArgumentParser adding the right arguments to pass the tests, returns a argparse.ArgumentParser object""" parser = argparse.ArgumentParser(description='Calculate your BMI') parser.add_argument('-w', '--weight', action='store', type=float, help='Your weight in kg', default=0) parser.add_argument('-l', '--length', action='store', type=float, help='Your length in cm', default=0) return parser pass
def create_parser(): """TODO: Create an ArgumentParser adding the right arguments to pass the tests, returns a argparse.ArgumentParser object""" parser = argparse.ArgumentParser(description='Calculate your BMI') parser.add_argument('-w', '--weight', action='store', type=float, help='Your weight in kg', default=0) parser.add_argument('-l', '--length', action='store', type=float, help='Your length in cm', default=0) return parser pass