language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def _get_config(config_location):
"""Get input and output configs from json."""
with open(config_location, 'r') as json_file:
configs = json.load(json_file, object_hook=Config.decode_config)
return configs | def _get_config(config_location):
"""Get input and output configs from json."""
with open(config_location, 'r') as json_file:
configs = json.load(json_file, object_hook=Config.decode_config)
return configs |
Python | def multi_log(self, msg, severity, cloud_logging=False, cloud_struct=None, use_global_loggers=True, log_name=None, cloud_logger=None):
"""Log to the Python logger and StackDriver struct logger."""
time_stamp = dt.utcnow()
l = self.log
log_methods = {
'DEBUG': l.debug,
'INFO': l.info,
'WARNING': l.warn,
'ERROR': l.error,
'CRITICAL': l.critical,
}
severity = severity.upper()
if severity not in log_methods:
severity = 'INFO'
log_exec = False
if isinstance(msg, Exception):
log_exec = True
log_methods[severity](msg, exc_info=log_exec)
sd_serverities = {
'DEBUG': 'DEBUG',
'INFO': 'INFO',
'WARNING': 'WARNING',
'ERROR': 'ERROR',
'CRITICAL': 'CRITICAL',
}
if cloud_logging:
cl = self.cloud_logger
struct = {'message': str(msg)}
if cloud_struct:
struct = {**struct, **cloud_struct}
cl.log_struct(
struct,
log_name=self.sd_log_url,
timestamp=time_stamp,
labels=self.labels,
severity=sd_serverities[severity]) | def multi_log(self, msg, severity, cloud_logging=False, cloud_struct=None, use_global_loggers=True, log_name=None, cloud_logger=None):
"""Log to the Python logger and StackDriver struct logger."""
time_stamp = dt.utcnow()
l = self.log
log_methods = {
'DEBUG': l.debug,
'INFO': l.info,
'WARNING': l.warn,
'ERROR': l.error,
'CRITICAL': l.critical,
}
severity = severity.upper()
if severity not in log_methods:
severity = 'INFO'
log_exec = False
if isinstance(msg, Exception):
log_exec = True
log_methods[severity](msg, exc_info=log_exec)
sd_serverities = {
'DEBUG': 'DEBUG',
'INFO': 'INFO',
'WARNING': 'WARNING',
'ERROR': 'ERROR',
'CRITICAL': 'CRITICAL',
}
if cloud_logging:
cl = self.cloud_logger
struct = {'message': str(msg)}
if cloud_struct:
struct = {**struct, **cloud_struct}
cl.log_struct(
struct,
log_name=self.sd_log_url,
timestamp=time_stamp,
labels=self.labels,
severity=sd_serverities[severity]) |
Python | def _get_secrets(secrets_location):
"""Get input and output secrets from json."""
with open(secrets_location, 'r') as json_file:
secrets= json.load(json_file, object_hook=Config.decode_config)
return secrets | def _get_secrets(secrets_location):
"""Get input and output secrets from json."""
with open(secrets_location, 'r') as json_file:
secrets= json.load(json_file, object_hook=Config.decode_config)
return secrets |
Python | def roll_two_dice(self) -> int:
"""Roll a pair of dice."""
first_die = self.roll_one_die()
second_die = self.roll_one_die()
self.this_throw = first_die + second_die
self.is_a_double = (first_die == second_die)
return self.this_throw | def roll_two_dice(self) -> int:
"""Roll a pair of dice."""
first_die = self.roll_one_die()
second_die = self.roll_one_die()
self.this_throw = first_die + second_die
self.is_a_double = (first_die == second_die)
return self.this_throw |
Python | def remove_card(self) -> cards.Card:
"""Remove card from the player's cards."""
assert (len(self.cards) > 0) # Must be at least one card held by the player.
this_card = self.cards.pop(0)
return this_card | def remove_card(self) -> cards.Card:
"""Remove card from the player's cards."""
assert (len(self.cards) > 0) # Must be at least one card held by the player.
this_card = self.cards.pop(0)
return this_card |
Python | def print_player_status(self, this_player: player.Player):
"""Print a line of status info about the player to stdout."""
print('Money = £', this_player.money,
' Square = ', self.player_square_name(this_player), sep='', end='')
if this_player.in_jail:
print(' (in jail).')
else:
print() | def print_player_status(self, this_player: player.Player):
"""Print a line of status info about the player to stdout."""
print('Money = £', this_player.money,
' Square = ', self.player_square_name(this_player), sep='', end='')
if this_player.in_jail:
print(' (in jail).')
else:
print() |
Python | def player_put_on_square(self, this_player: player.Player):
"""When a player lands on a new square, update stats and (possibly) print some status to stdout."""
# Add to stats about squares landed on.
self.land_on_count[this_player.square] += 1
if self.verbose:
self.print_player_status(this_player) | def player_put_on_square(self, this_player: player.Player):
"""When a player lands on a new square, update stats and (possibly) print some status to stdout."""
# Add to stats about squares landed on.
self.land_on_count[this_player.square] += 1
if self.verbose:
self.print_player_status(this_player) |
Python | def take_a_turn(self, this_player: player.Player):
"""Player takes a turn. May involve several throw of dice (due to doubles),
taking actions due to square landed on (Go To Jail, Chance, etc.), or just trying to leave Jail."""
if self.verbose:
print('\nNew turn. Player # =', this_player.player_num, end=' ')
self.print_player_status(this_player)
turn_continues = True # Set this to False when the turn is over.
doubles_in_turn = 0 # Needed so that we send the player to Jail if 3 throws in turn.
while turn_continues:
this_throw = 0 # Zero indicates dice hasn't been thrown yet.
# Check if player is in jail, and trying getting out of jail if they are.
if this_player.in_jail:
this_throw = self.try_to_leave_jail(this_player)
if this_player.in_jail:
turn_continues = False
if this_throw != 0: # Only way to get out of jail is to throw a double.
doubles_in_turn += 1
# If they are still in Jail, then the turn is over.
if not this_player.in_jail:
# Now it is time to throw the dice and move...
# ... but if dice were thrown in Prison, then no need to throw them again.
if this_throw == 0:
self.dice.roll_two_dice()
if self.dice.is_a_double: # Check for a double.
doubles_in_turn += 1
if self.verbose:
self.dice.print_dice()
# At this point, we know the player is not currently in Jail, and the dice have been thrown,
# (either in Jail or out of Jail).
# Since dice have been thrown, it is possible that the limit of 3 doubles in a turn has been met,
# which sends the player to Jail and ends the turn.
if doubles_in_turn >= 3:
self.go_to_jail(this_player)
turn_continues = False
# At last, we can move the player forward according to the latest dice throw.
else:
old_square = this_player.square
this_player.square = self.board.forwards(current=this_player.square, spaces=self.dice.this_throw)
# If the square that the player has landed on has a lower index than the one he started on,
# then he must he passed Go during this move.
if this_player.square < old_square:
self.passed_go(this_player)
self.player_put_on_square(this_player) # Update stats, print player status.
# Take action depending on the type of square they've landed on.
self.landed_on_a_square(this_player)
# Player may have been put in Jail. If so, the turn is over.
if this_player.in_jail:
turn_continues = False
# Only way for turn to continue is if the throw was a double.
if not (turn_continues and self.dice.is_a_double):
turn_continues = False
# Add to stats about squares that turns end on.
self.turn_end_count[this_player.square] += 1
# Update list of money at end of each turn.
this_player.money_at_end_of_turn.append(this_player.money)
if self.verbose:
print('Turn over.') | def take_a_turn(self, this_player: player.Player):
"""Player takes a turn. May involve several throw of dice (due to doubles),
taking actions due to square landed on (Go To Jail, Chance, etc.), or just trying to leave Jail."""
if self.verbose:
print('\nNew turn. Player # =', this_player.player_num, end=' ')
self.print_player_status(this_player)
turn_continues = True # Set this to False when the turn is over.
doubles_in_turn = 0 # Needed so that we send the player to Jail if 3 throws in turn.
while turn_continues:
this_throw = 0 # Zero indicates dice hasn't been thrown yet.
# Check if player is in jail, and trying getting out of jail if they are.
if this_player.in_jail:
this_throw = self.try_to_leave_jail(this_player)
if this_player.in_jail:
turn_continues = False
if this_throw != 0: # Only way to get out of jail is to throw a double.
doubles_in_turn += 1
# If they are still in Jail, then the turn is over.
if not this_player.in_jail:
# Now it is time to throw the dice and move...
# ... but if dice were thrown in Prison, then no need to throw them again.
if this_throw == 0:
self.dice.roll_two_dice()
if self.dice.is_a_double: # Check for a double.
doubles_in_turn += 1
if self.verbose:
self.dice.print_dice()
# At this point, we know the player is not currently in Jail, and the dice have been thrown,
# (either in Jail or out of Jail).
# Since dice have been thrown, it is possible that the limit of 3 doubles in a turn has been met,
# which sends the player to Jail and ends the turn.
if doubles_in_turn >= 3:
self.go_to_jail(this_player)
turn_continues = False
# At last, we can move the player forward according to the latest dice throw.
else:
old_square = this_player.square
this_player.square = self.board.forwards(current=this_player.square, spaces=self.dice.this_throw)
# If the square that the player has landed on has a lower index than the one he started on,
# then he must he passed Go during this move.
if this_player.square < old_square:
self.passed_go(this_player)
self.player_put_on_square(this_player) # Update stats, print player status.
# Take action depending on the type of square they've landed on.
self.landed_on_a_square(this_player)
# Player may have been put in Jail. If so, the turn is over.
if this_player.in_jail:
turn_continues = False
# Only way for turn to continue is if the throw was a double.
if not (turn_continues and self.dice.is_a_double):
turn_continues = False
# Add to stats about squares that turns end on.
self.turn_end_count[this_player.square] += 1
# Update list of money at end of each turn.
this_player.money_at_end_of_turn.append(this_player.money)
if self.verbose:
print('Turn over.') |
Python | def advance_to_square(self, this_player: player.Player, target_square: str) -> bool:
"""Move the parm player directly to the parm square.
Return a bool to indicate if the player has passed Go in the process."""
curr_square = this_player.square
# Move the player to the target square.
this_player.square = self.board.find_square(target_square)
self.player_put_on_square(this_player) # Update stats, print player status.
new_square = this_player.square
# If the square that the player has advanced to has a lower index than the one he started on,
# then he must he passed Go during this move.
return new_square < curr_square | def advance_to_square(self, this_player: player.Player, target_square: str) -> bool:
"""Move the parm player directly to the parm square.
Return a bool to indicate if the player has passed Go in the process."""
curr_square = this_player.square
# Move the player to the target square.
this_player.square = self.board.find_square(target_square)
self.player_put_on_square(this_player) # Update stats, print player status.
new_square = this_player.square
# If the square that the player has advanced to has a lower index than the one he started on,
# then he must he passed Go during this move.
return new_square < curr_square |
Python | def passed_go(self, this_player: player.Player):
"""Gives the player some money for passing Go."""
reward = 200
this_player.money += reward
# TODO Increase money by just £100 if they currently have a loan from the bank.
if self.verbose:
print('Passed Go, reward = £', reward, sep='') | def passed_go(self, this_player: player.Player):
"""Gives the player some money for passing Go."""
reward = 200
this_player.money += reward
# TODO Increase money by just £100 if they currently have a loan from the bank.
if self.verbose:
print('Passed Go, reward = £', reward, sep='') |
Python | def go_to_jail(self, this_player: player.Player):
"""Send the parameter player to Jail."""
assert not this_player.in_jail # Only a player who is not in Jail can be sent to Jail.
if self.verbose:
print('Sent to Jail!!!')
self.advance_to_square(this_player, 'Jail')
this_player.in_jail = True
this_player.double_attempts_left = 3 | def go_to_jail(self, this_player: player.Player):
"""Send the parameter player to Jail."""
assert not this_player.in_jail # Only a player who is not in Jail can be sent to Jail.
if self.verbose:
print('Sent to Jail!!!')
self.advance_to_square(this_player, 'Jail')
this_player.in_jail = True
this_player.double_attempts_left = 3 |
Python | def try_to_leave_jail(self, this_player: player.Player) -> int:
"""Parm is a player who is attempting to get out of Jail.
Each call to this method is one attempt to leave.
Return value is any roll of the dice that they did in Jail."""
assert this_player.in_jail # Only a player in Jail should try to leave it.
# Use the player's Get Out Of Jail card, if they have one.
if len(this_player.cards) > 0:
removed_card = this_player.remove_card()
assert (removed_card.card_name[0:21] == 'Get out of jail free.')
# Put the card back in its home pack.
assert(removed_card.pack_name in {'Chance', 'Community Chest'})
if removed_card.pack_name == 'Chance':
self.chance.add_card(removed_card)
else:
self.community_chest.add_card(removed_card)
# Let the player out of Jail.
this_player.in_jail = False
if self.verbose:
print('Used a Get Of Jail Free Card to leave Jail.')
return 0
# Let the player out (with fine) if he has made three attempts already.
if this_player.double_attempts_left == 0:
this_player.money -= 50 # Player pays £50 fine to leave jail.
this_player.in_jail = False
# TODO If the player fails to roll doubles for three turns, he or she must pay the $50 fine and then
# moves the number shown on the dice or skip one turn.
if self.verbose:
print('Let out of Jail because they have made 3 attempts at doubles already, £50 fine.')
return 0
# Throw dice for a double.
else:
self.dice.roll_two_dice()
if self.verbose:
print('Attempting to get out of jail with a dice roll. ', end='')
self.dice.print_dice()
this_player.double_attempts_left -= 1
if self.dice.is_a_double:
this_player.in_jail = False
if self.verbose:
print('Got out of Jail with a dice roll.')
return self.dice.this_throw | def try_to_leave_jail(self, this_player: player.Player) -> int:
"""Parm is a player who is attempting to get out of Jail.
Each call to this method is one attempt to leave.
Return value is any roll of the dice that they did in Jail."""
assert this_player.in_jail # Only a player in Jail should try to leave it.
# Use the player's Get Out Of Jail card, if they have one.
if len(this_player.cards) > 0:
removed_card = this_player.remove_card()
assert (removed_card.card_name[0:21] == 'Get out of jail free.')
# Put the card back in its home pack.
assert(removed_card.pack_name in {'Chance', 'Community Chest'})
if removed_card.pack_name == 'Chance':
self.chance.add_card(removed_card)
else:
self.community_chest.add_card(removed_card)
# Let the player out of Jail.
this_player.in_jail = False
if self.verbose:
print('Used a Get Of Jail Free Card to leave Jail.')
return 0
# Let the player out (with fine) if he has made three attempts already.
if this_player.double_attempts_left == 0:
this_player.money -= 50 # Player pays £50 fine to leave jail.
this_player.in_jail = False
# TODO If the player fails to roll doubles for three turns, he or she must pay the $50 fine and then
# moves the number shown on the dice or skip one turn.
if self.verbose:
print('Let out of Jail because they have made 3 attempts at doubles already, £50 fine.')
return 0
# Throw dice for a double.
else:
self.dice.roll_two_dice()
if self.verbose:
print('Attempting to get out of jail with a dice roll. ', end='')
self.dice.print_dice()
this_player.double_attempts_left -= 1
if self.dice.is_a_double:
this_player.in_jail = False
if self.verbose:
print('Got out of Jail with a dice roll.')
return self.dice.this_throw |
Python | def landed_on_a_square(self, this_player: player.Player):
"""Do whatever actions the square that the player has landed on demands.
Recursive, since some actions tell the player to move to another square, which then needs to be acted on."""
this_square = self.player_square(this_player)
if this_square.name == 'Go To Jail':
self.go_to_jail(this_player)
elif this_square.category in {'Chance', 'Community Chest'}:
# Make a note of the index of the square that the player was on before action card was acted on.
square_before_action = this_player.square
self.land_on_action_square(this_player, this_square)
# Action card has put the player on a new square. So now do that action.
if square_before_action != this_player.square:
self.landed_on_a_square(this_player)
# If player has landed on a tax square, reduce his money by tax amount.
elif this_square.name == 'Income Tax (pay £200)':
this_player.money -= 200
elif this_square.name == 'Super Tax (pay £100)':
this_player.money -= 100
# TODO Need to add actions for Go, Properties, Stations and Utilities. | def landed_on_a_square(self, this_player: player.Player):
"""Do whatever actions the square that the player has landed on demands.
Recursive, since some actions tell the player to move to another square, which then needs to be acted on."""
this_square = self.player_square(this_player)
if this_square.name == 'Go To Jail':
self.go_to_jail(this_player)
elif this_square.category in {'Chance', 'Community Chest'}:
# Make a note of the index of the square that the player was on before action card was acted on.
square_before_action = this_player.square
self.land_on_action_square(this_player, this_square)
# Action card has put the player on a new square. So now do that action.
if square_before_action != this_player.square:
self.landed_on_a_square(this_player)
# If player has landed on a tax square, reduce his money by tax amount.
elif this_square.name == 'Income Tax (pay £200)':
this_player.money -= 200
elif this_square.name == 'Super Tax (pay £100)':
this_player.money -= 100
# TODO Need to add actions for Go, Properties, Stations and Utilities. |
Python | def land_on_action_square(self, this_player: player.Player, this_square: board.Square):
"""Player has landed on either Chance or Community Chest.
This method does the things that these action squares demand."""
# Only players who are currently on Chance or Community Chest should do this method.
assert (self.player_square(this_player).category in {'Chance', 'Community Chest'})
# Get an action_card from the top of either Chance or Community Chest packs.
pack_name = this_square.category
if pack_name == 'Chance':
action_card = self.chance.take_card()
else:
action_card = self.community_chest.take_card()
if self.verbose:
action_card.print_card(print_pack=True)
if action_card.category == 'Keep Card': # If it is a keep card, then the player should take it.
this_player.add_card(action_card)
elif action_card.category == 'Jail': # If Go To Jail, send the player to Jail.
self.go_to_jail(this_player)
elif action_card.category == 'Advance': # If Advance, send the player to the named square.
just_passed_go = self.advance_to_square(this_player, action_card.advance_to)
if just_passed_go:
self.passed_go(this_player) # Give the player £200 (or £100 if borrowing from bank)
elif action_card.card_name == 'Go back three spaces': # Move player back 3 squares.
this_player.square = self.board.backwards(current=this_player.square, spaces=3)
self.player_put_on_square(this_player) # Update stats, print player status.
elif action_card.card_name == 'Go back to Old Kent Road': # Send player to Old Kent Road.
this_player.square = self.board.find_square('Old Kent Road')
self.player_put_on_square(this_player) # Update stats, print player status.
elif action_card.category == 'Money': # If Money card, adjust player's money.
this_player.money += action_card.money_amount
# Except for keep cards, put the card back into its pack.
if action_card.category != 'Keep Card':
if pack_name == 'Chance':
self.chance.add_card(action_card)
else:
self.community_chest.add_card(action_card) | def land_on_action_square(self, this_player: player.Player, this_square: board.Square):
"""Player has landed on either Chance or Community Chest.
This method does the things that these action squares demand."""
# Only players who are currently on Chance or Community Chest should do this method.
assert (self.player_square(this_player).category in {'Chance', 'Community Chest'})
# Get an action_card from the top of either Chance or Community Chest packs.
pack_name = this_square.category
if pack_name == 'Chance':
action_card = self.chance.take_card()
else:
action_card = self.community_chest.take_card()
if self.verbose:
action_card.print_card(print_pack=True)
if action_card.category == 'Keep Card': # If it is a keep card, then the player should take it.
this_player.add_card(action_card)
elif action_card.category == 'Jail': # If Go To Jail, send the player to Jail.
self.go_to_jail(this_player)
elif action_card.category == 'Advance': # If Advance, send the player to the named square.
just_passed_go = self.advance_to_square(this_player, action_card.advance_to)
if just_passed_go:
self.passed_go(this_player) # Give the player £200 (or £100 if borrowing from bank)
elif action_card.card_name == 'Go back three spaces': # Move player back 3 squares.
this_player.square = self.board.backwards(current=this_player.square, spaces=3)
self.player_put_on_square(this_player) # Update stats, print player status.
elif action_card.card_name == 'Go back to Old Kent Road': # Send player to Old Kent Road.
this_player.square = self.board.find_square('Old Kent Road')
self.player_put_on_square(this_player) # Update stats, print player status.
elif action_card.category == 'Money': # If Money card, adjust player's money.
this_player.money += action_card.money_amount
# Except for keep cards, put the card back into its pack.
if action_card.category != 'Keep Card':
if pack_name == 'Chance':
self.chance.add_card(action_card)
else:
self.community_chest.add_card(action_card) |
Python | def print_card(self, print_pack: bool):
"""Print out info about the parm card to stdout."""
if print_pack:
print('Pack =', self.pack_name, ' ', end='')
print('Card =', self.card_name, ' Category =', self.category, end='')
if self.category == 'Money':
print(' Money Amount = £', self.money_amount, sep='')
elif self.category == 'Advance':
print(' To =', self.advance_to)
else:
print() | def print_card(self, print_pack: bool):
"""Print out info about the parm card to stdout."""
if print_pack:
print('Pack =', self.pack_name, ' ', end='')
print('Card =', self.card_name, ' Category =', self.category, end='')
if self.category == 'Money':
print(' Money Amount = £', self.money_amount, sep='')
elif self.category == 'Advance':
print(' To =', self.advance_to)
else:
print() |
Python | def find_square(self, target_name: str) -> int:
"""Return the square index of the parm name. Return None if no such square with that name."""
found_square_num = None
for i in range(len(self.squares)):
if target_name == self.squares[i].name:
found_square_num = i
break
return found_square_num | def find_square(self, target_name: str) -> int:
"""Return the square index of the parm name. Return None if no such square with that name."""
found_square_num = None
for i in range(len(self.squares)):
if target_name == self.squares[i].name:
found_square_num = i
break
return found_square_num |
Python | def write_ld(writer, protomsg):
"""
Write a length delimited protobuf message to the provided writer
Arguments:
writer: a writer, e.g. a file opened in 'wb' mode or a BytesIO or StringIO object
protomsg: a protobuf message
"""
# serialize the message to a bytes array
s = protomsg.SerializeToString()
# retrieve message length as a bytes block
len_bytes = struct.pack('>L', len(s))
# write message length + serialized message
writer.write(len_bytes + s) | def write_ld(writer, protomsg):
"""
Write a length delimited protobuf message to the provided writer
Arguments:
writer: a writer, e.g. a file opened in 'wb' mode or a BytesIO or StringIO object
protomsg: a protobuf message
"""
# serialize the message to a bytes array
s = protomsg.SerializeToString()
# retrieve message length as a bytes block
len_bytes = struct.pack('>L', len(s))
# write message length + serialized message
writer.write(len_bytes + s) |
Python | def read_ld(reader, msgtype):
"""
Read length-delimited protobuf messages from the provided reader. Returns an iterator
that can be used to traverse the messages found in the stream.
Example:
with open('path/to/file.ld', 'rb') as f:
for msg in read_ld(f, pb.User):
print(msg)
Arguments:
reader: a reader, e.g. a file opened with 'rb' or a BytesIO or StringIO object
msgtype: the descriptor of the protobuf message, typically the name of the message,
e.g. pb.User
"""
while True:
assert reader is not None, "reader is required"
assert msgtype is not None, "msgtype is required"
msg_len_bytes = reader.read(4)
# EOF
if len(msg_len_bytes) == 0:
return
# retrieve length prefix
# struct.unpack always returns a tuple, even if there is only one element
msg_len = struct.unpack('>L', msg_len_bytes)[0]
# read message as a byte string
proto_str = reader.read(msg_len)
# EOF
if len(proto_str) == 0:
return
# de-serialize the bytes into a protobuf message
msg = msgtype()
msg.ParseFromString(proto_str)
yield msg | def read_ld(reader, msgtype):
"""
Read length-delimited protobuf messages from the provided reader. Returns an iterator
that can be used to traverse the messages found in the stream.
Example:
with open('path/to/file.ld', 'rb') as f:
for msg in read_ld(f, pb.User):
print(msg)
Arguments:
reader: a reader, e.g. a file opened with 'rb' or a BytesIO or StringIO object
msgtype: the descriptor of the protobuf message, typically the name of the message,
e.g. pb.User
"""
while True:
assert reader is not None, "reader is required"
assert msgtype is not None, "msgtype is required"
msg_len_bytes = reader.read(4)
# EOF
if len(msg_len_bytes) == 0:
return
# retrieve length prefix
# struct.unpack always returns a tuple, even if there is only one element
msg_len = struct.unpack('>L', msg_len_bytes)[0]
# read message as a byte string
proto_str = reader.read(msg_len)
# EOF
if len(proto_str) == 0:
return
# de-serialize the bytes into a protobuf message
msg = msgtype()
msg.ParseFromString(proto_str)
yield msg |
Python | def canonical_order(graph, face):
'''
Outputs the coordinates of the nodes of the face in a canonical order
in particular, the first one is the lex-min.
You need to use the graph structure to make this work
'''
lex_sorted_nodes = sorted(face)
first_node = lex_sorted_nodes[0]
cycle_sorted_nodes = [first_node]
local_cycle = nx.subgraph( graph, face)
#Compute the second node locally based on angle orientation
v = first_node
locations = []
neighbor_list = list(local_cycle.neighbors(v))
for w in neighbor_list:
locations.append(graph.nodes[w]["pos"] - graph.nodes[v]["pos"])
angles = [float(np.arctan2(x[1], x[0])) for x in locations]
neighbor_list.sort(key=dict(zip(neighbor_list, angles)).get)
second_node = neighbor_list[0]
cycle_sorted_nodes.append(second_node)
##Now compute a canonical ordering of local_cycle, clockwise, starting
##from first_node
while len(cycle_sorted_nodes) < len(lex_sorted_nodes):
v = cycle_sorted_nodes[-1]
neighbor_list = list(local_cycle.neighbors(v))
neighbor_list.remove(cycle_sorted_nodes[-2])
cycle_sorted_nodes.append(neighbor_list[0])
return cycle_sorted_nodes | def canonical_order(graph, face):
'''
Outputs the coordinates of the nodes of the face in a canonical order
in particular, the first one is the lex-min.
You need to use the graph structure to make this work
'''
lex_sorted_nodes = sorted(face)
first_node = lex_sorted_nodes[0]
cycle_sorted_nodes = [first_node]
local_cycle = nx.subgraph( graph, face)
#Compute the second node locally based on angle orientation
v = first_node
locations = []
neighbor_list = list(local_cycle.neighbors(v))
for w in neighbor_list:
locations.append(graph.nodes[w]["pos"] - graph.nodes[v]["pos"])
angles = [float(np.arctan2(x[1], x[0])) for x in locations]
neighbor_list.sort(key=dict(zip(neighbor_list, angles)).get)
second_node = neighbor_list[0]
cycle_sorted_nodes.append(second_node)
##Now compute a canonical ordering of local_cycle, clockwise, starting
##from first_node
while len(cycle_sorted_nodes) < len(lex_sorted_nodes):
v = cycle_sorted_nodes[-1]
neighbor_list = list(local_cycle.neighbors(v))
neighbor_list.remove(cycle_sorted_nodes[-2])
cycle_sorted_nodes.append(neighbor_list[0])
return cycle_sorted_nodes |
Python | def delete_copies_up_to_permutation(array):
'''
Given an array of tuples, return an array consisting of one representative
for each element in the orbit of the reordering action.
'''
cleaned_array = list(set([tuple(canonical_order(x)) for x in array]))
return cleaned_array | def delete_copies_up_to_permutation(array):
'''
Given an array of tuples, return an array consisting of one representative
for each element in the orbit of the reordering action.
'''
cleaned_array = list(set([tuple(canonical_order(x)) for x in array]))
return cleaned_array |
Python | def face_sierpinski_mesh(partition, special_faces):
"""'Sierpinskifies' certain faces of the graph by adding nodes and edges to
certain faces.
Args:
partition (Gerrychain Partition): partition object which contain assignment
and whose graph will have edges and nodes added to
special_faces (list): list of faces that we want to add node/edges to
Raises:
RuntimeError if SIERPINSKI_POP_STYLE of config file is neither 'uniform'
nor 'zero'
"""
graph = partition.graph
# Get maximum node label.
label = max(list(graph.nodes()))
# Assign each node to its district in partition
for node in graph.nodes():
graph.nodes[node][config['ASSIGN_COL']] = partition.assignment[node]
for face in special_faces:
neighbors = [] # Neighbors of face
locationCount = np.array([0,0]).astype("float64")
# For each face, add to neighbor_list and add to location count
for vertex in face:
neighbors.append(vertex)
locationCount += np.array(graph.nodes[vertex]["pos"]).astype("float64")
# Save the average of each of the face's positions
facePosition = locationCount / len(face)
# In order, store relative position of each vertex to the position of the face
locations = [graph.nodes[vertex]['pos'] - facePosition for vertex in face]
# Sort neighbors according to each node's angle with the center of the face
angles = [float(np.arctan2(x[0], x[1])) for x in locations]
neighbors.sort(key=dict(zip(neighbors, angles)).get)
newNodes = []
newEdges = []
# For each consecutive pair of nodes, remove their edge, create a new
# node at their average position, and connect edge node to the new node:
for vertex, next_vertex in zip(neighbors, neighbors[1:] + [neighbors[0]]):
label += 1
# Add new node to graph with corresponding label at the average position
# of vertex and next_vertex, and with 0 population and 0 votes
graph.add_node(label)
avgPos = (np.array(graph.nodes[vertex]['pos']) +
np.array(graph.nodes[next_vertex]['pos'])) / 2
graph.nodes[label]['pos'] = avgPos
graph.nodes[label][config['X_POSITION']] = avgPos[0]
graph.nodes[label][config['Y_POSITION']] = avgPos[1]
graph.nodes[label][config['POP_COL']] = 0
graph.nodes[label][config['PARTY_A_COL']] = 0
graph.nodes[label][config['PARTY_B_COL']] = 0
# For each new node, 'move' a third of the population, Party A votes,
# and Party B votes from its two adjacent nodes which previously exists
# to itself (so that each previously existing node equally shares
# its statistics with the two new nodes adjacent to it)
# Note: should only be used when all special faces are NOT on edges
if config['SIERPINSKI_POP_STYLE'] == 'uniform':
for vert in [vertex, next_vertex]:
for keyword, orig_keyword in zip(['POP_COL', 'PARTY_A_COL', 'PARTY_B_COL'],
['orig_pop', 'orig_A', 'orig_B']):
# Save original values if not done already
if orig_keyword not in graph.nodes[vert]:
graph.nodes[vert][orig_keyword] = graph.nodes[vert][config[keyword]]
# Increment values of new node and decrement values of old nodes
# by the appropriate amount.
graph.nodes[label][config[keyword]] += graph.nodes[vert][orig_keyword] // 3
graph.nodes[vert][config[keyword]] -= graph.nodes[vert][orig_keyword] // 3
# Assign new node to same district as neighbor. Note that intended
# behavior is that special_faces do not correspond to cut edges,
# and therefore both vertex and next_vertex will be of the same
# district.
graph.nodes[label][config['ASSIGN_COL']] = graph.nodes[vertex][config['ASSIGN_COL']]
# Choose a random adjacent node, assign the new node to the same partition,
# and move half of its votes and population to said node
elif config['SIERPINSKI_POP_STYLE'] == 'random':
chosenNode = random.choice([graph.nodes[vertex], graph.nodes[next_vertex]])
graph.nodes[label][config['ASSIGN_COL']] = chosenNode[config['ASSIGN_COL']]
for keyword in ['POP_COL', 'PARTY_A_COL', 'PARTY_B_COL']:
graph.nodes[label][config[keyword]] += chosenNode[config[keyword]] // 2
chosenNode[config[keyword]] -= chosenNode[config[keyword]] // 2
# Set the population and votes of the new nodes to zero. Do not change
# previously existing nodes. Assign to random neighbor.
elif config['SIERPINSKI_POP_STYLE'] == 'zero':
graph.nodes[label][config['ASSIGN_COL']] =\
random.choice([graph.nodes[vertex][config['ASSIGN_COL']],
graph.nodes[next_vertex][config['ASSIGN_COL']]]
)
else:
raise RuntimeError('SIERPINSKI_POP_STYLE must be "uniform", "random", or "zero"')
# Remove edge between consecutive nodes if it exists
if graph.has_edge(vertex, next_vertex):
graph.remove_edge(vertex, next_vertex)
# Add edge between both of the original nodes and the new node
graph.add_edge(vertex, label)
newEdges.append((vertex, label))
graph.add_edge(label, next_vertex)
newEdges.append((label, next_vertex))
# Add node to connections
newNodes.append(label)
# Add an edge between each consecutive new node
for vertex in range(len(newNodes)):
graph.add_edge(newNodes[vertex], newNodes[(vertex+1) % len(newNodes)])
newEdges.append((newNodes[vertex], newNodes[(vertex+1) % len(newNodes)]))
# For each new edge of the face, set sibilings to be the tuple of all
# new edges
siblings = tuple(newEdges)
for edge in newEdges:
graph.edges[edge]['siblings'] = siblings | def face_sierpinski_mesh(partition, special_faces):
"""'Sierpinskifies' certain faces of the graph by adding nodes and edges to
certain faces.
Args:
partition (Gerrychain Partition): partition object which contain assignment
and whose graph will have edges and nodes added to
special_faces (list): list of faces that we want to add node/edges to
Raises:
RuntimeError if SIERPINSKI_POP_STYLE of config file is neither 'uniform'
nor 'zero'
"""
graph = partition.graph
# Get maximum node label.
label = max(list(graph.nodes()))
# Assign each node to its district in partition
for node in graph.nodes():
graph.nodes[node][config['ASSIGN_COL']] = partition.assignment[node]
for face in special_faces:
neighbors = [] # Neighbors of face
locationCount = np.array([0,0]).astype("float64")
# For each face, add to neighbor_list and add to location count
for vertex in face:
neighbors.append(vertex)
locationCount += np.array(graph.nodes[vertex]["pos"]).astype("float64")
# Save the average of each of the face's positions
facePosition = locationCount / len(face)
# In order, store relative position of each vertex to the position of the face
locations = [graph.nodes[vertex]['pos'] - facePosition for vertex in face]
# Sort neighbors according to each node's angle with the center of the face
angles = [float(np.arctan2(x[0], x[1])) for x in locations]
neighbors.sort(key=dict(zip(neighbors, angles)).get)
newNodes = []
newEdges = []
# For each consecutive pair of nodes, remove their edge, create a new
# node at their average position, and connect edge node to the new node:
for vertex, next_vertex in zip(neighbors, neighbors[1:] + [neighbors[0]]):
label += 1
# Add new node to graph with corresponding label at the average position
# of vertex and next_vertex, and with 0 population and 0 votes
graph.add_node(label)
avgPos = (np.array(graph.nodes[vertex]['pos']) +
np.array(graph.nodes[next_vertex]['pos'])) / 2
graph.nodes[label]['pos'] = avgPos
graph.nodes[label][config['X_POSITION']] = avgPos[0]
graph.nodes[label][config['Y_POSITION']] = avgPos[1]
graph.nodes[label][config['POP_COL']] = 0
graph.nodes[label][config['PARTY_A_COL']] = 0
graph.nodes[label][config['PARTY_B_COL']] = 0
# For each new node, 'move' a third of the population, Party A votes,
# and Party B votes from its two adjacent nodes which previously exists
# to itself (so that each previously existing node equally shares
# its statistics with the two new nodes adjacent to it)
# Note: should only be used when all special faces are NOT on edges
if config['SIERPINSKI_POP_STYLE'] == 'uniform':
for vert in [vertex, next_vertex]:
for keyword, orig_keyword in zip(['POP_COL', 'PARTY_A_COL', 'PARTY_B_COL'],
['orig_pop', 'orig_A', 'orig_B']):
# Save original values if not done already
if orig_keyword not in graph.nodes[vert]:
graph.nodes[vert][orig_keyword] = graph.nodes[vert][config[keyword]]
# Increment values of new node and decrement values of old nodes
# by the appropriate amount.
graph.nodes[label][config[keyword]] += graph.nodes[vert][orig_keyword] // 3
graph.nodes[vert][config[keyword]] -= graph.nodes[vert][orig_keyword] // 3
# Assign new node to same district as neighbor. Note that intended
# behavior is that special_faces do not correspond to cut edges,
# and therefore both vertex and next_vertex will be of the same
# district.
graph.nodes[label][config['ASSIGN_COL']] = graph.nodes[vertex][config['ASSIGN_COL']]
# Choose a random adjacent node, assign the new node to the same partition,
# and move half of its votes and population to said node
elif config['SIERPINSKI_POP_STYLE'] == 'random':
chosenNode = random.choice([graph.nodes[vertex], graph.nodes[next_vertex]])
graph.nodes[label][config['ASSIGN_COL']] = chosenNode[config['ASSIGN_COL']]
for keyword in ['POP_COL', 'PARTY_A_COL', 'PARTY_B_COL']:
graph.nodes[label][config[keyword]] += chosenNode[config[keyword]] // 2
chosenNode[config[keyword]] -= chosenNode[config[keyword]] // 2
# Set the population and votes of the new nodes to zero. Do not change
# previously existing nodes. Assign to random neighbor.
elif config['SIERPINSKI_POP_STYLE'] == 'zero':
graph.nodes[label][config['ASSIGN_COL']] =\
random.choice([graph.nodes[vertex][config['ASSIGN_COL']],
graph.nodes[next_vertex][config['ASSIGN_COL']]]
)
else:
raise RuntimeError('SIERPINSKI_POP_STYLE must be "uniform", "random", or "zero"')
# Remove edge between consecutive nodes if it exists
if graph.has_edge(vertex, next_vertex):
graph.remove_edge(vertex, next_vertex)
# Add edge between both of the original nodes and the new node
graph.add_edge(vertex, label)
newEdges.append((vertex, label))
graph.add_edge(label, next_vertex)
newEdges.append((label, next_vertex))
# Add node to connections
newNodes.append(label)
# Add an edge between each consecutive new node
for vertex in range(len(newNodes)):
graph.add_edge(newNodes[vertex], newNodes[(vertex+1) % len(newNodes)])
newEdges.append((newNodes[vertex], newNodes[(vertex+1) % len(newNodes)]))
# For each new edge of the face, set sibilings to be the tuple of all
# new edges
siblings = tuple(newEdges)
for edge in newEdges:
graph.edges[edge]['siblings'] = siblings |
Python | def preprocessing(path_to_json):
"""Takes file path to JSON graph, and returns the appropriate
Args:
path_to_json ([String]): path to graph in JSON format
Returns:
graph (Gerrychain Graph): graph in JSON file following cleaning
dual (Gerrychain Graph): planar dual of graph
"""
graph = Graph.from_json(path_to_json)
# For each node in graph, set 'pos' keyword to position
for node in graph.nodes():
graph.nodes[node]['pos'] = (graph.nodes[node][config['X_POSITION']],
graph.nodes[node][config['Y_POSITION']])
save_fig(graph, config['UNDERLYING_GRAPH_FILE'], config['WIDTH'])
dual = facefinder.restricted_planar_dual(graph)
return graph, dual | def preprocessing(path_to_json):
"""Takes file path to JSON graph, and returns the appropriate
Args:
path_to_json ([String]): path to graph in JSON format
Returns:
graph (Gerrychain Graph): graph in JSON file following cleaning
dual (Gerrychain Graph): planar dual of graph
"""
graph = Graph.from_json(path_to_json)
# For each node in graph, set 'pos' keyword to position
for node in graph.nodes():
graph.nodes[node]['pos'] = (graph.nodes[node][config['X_POSITION']],
graph.nodes[node][config['Y_POSITION']])
save_fig(graph, config['UNDERLYING_GRAPH_FILE'], config['WIDTH'])
dual = facefinder.restricted_planar_dual(graph)
return graph, dual |
Python | def save_fig(graph, path, size):
"""Saves graph to file in desired formed
Args:
graph (Gerrychain Graph): graph to be saved
path (String): path to file location
size (int): width of image
"""
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1, width=size, cmap=plt.get_cmap('jet'))
# Gets format from end of filename
plt.savefig(path, format=path.split('.')[-1])
plt.close() | def save_fig(graph, path, size):
"""Saves graph to file in desired formed
Args:
graph (Gerrychain Graph): graph to be saved
path (String): path to file location
size (int): width of image
"""
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=1, width=size, cmap=plt.get_cmap('jet'))
# Gets format from end of filename
plt.savefig(path, format=path.split('.')[-1])
plt.close() |
Python | def determine_special_faces(graph, dist):
"""Determines the special faces, which are those nodes whose distance is
at least k
Args:
graph (Gerrychain Graph): graph to determine special faces of
dist (numeric): distance such that nodes are considered special if
they have a 'distance' of at least this value
Returns:
list: list of nodes which are special
"""
return [node for node in graph.nodes() if graph.nodes[node]['distance'] >= dist] | def determine_special_faces(graph, dist):
"""Determines the special faces, which are those nodes whose distance is
at least k
Args:
graph (Gerrychain Graph): graph to determine special faces of
dist (numeric): distance such that nodes are considered special if
they have a 'distance' of at least this value
Returns:
list: list of nodes which are special
"""
return [node for node in graph.nodes() if graph.nodes[node]['distance'] >= dist] |
Python | def determine_special_faces_random(graph, exp=1):
"""Determines the special faces, which are determined randomly with the probability
of a given node being considered special being proportional to its distance
raised to the exp power
Args:
graph (Gerrychain Graph): graph to determine special faces of
exp (float, optional): exponent appearing in probability of a given node
being considered special. Defaults to 1.
Returns:
list: list of nodes which are special
"""
max_dist = max(graph.nodes[node]['distance'] for node in graph.nodes())
return [node for node in graph.nodes() if random.uniform < (graph.nodes[node]['distance'] / max_dist) ** exp] | def determine_special_faces_random(graph, exp=1):
"""Determines the special faces, which are determined randomly with the probability
of a given node being considered special being proportional to its distance
raised to the exp power
Args:
graph (Gerrychain Graph): graph to determine special faces of
exp (float, optional): exponent appearing in probability of a given node
being considered special. Defaults to 1.
Returns:
list: list of nodes which are special
"""
max_dist = max(graph.nodes[node]['distance'] for node in graph.nodes())
return [node for node in graph.nodes() if random.uniform < (graph.nodes[node]['distance'] / max_dist) ** exp] |
Python | def metamander_around_partition(partition, dual, tag, secret=False, special_param=2):
"""Metamanders around a partition by determining the set of special faces,
and then sierpinskifying them.
Args:
partition (Gerrychain Partition): Partition to metamander around
dual (Networkx Graph): planar dual of partition's graph
secret (Boolean): whether to metamander 'in secret'. If True, determines
special faces randomly, else not.
special_param (numeric): additional parameter passed to special faces function
"""
facefinder.viz(partition, set([]))
plt.savefig("./plots/large_sample/target_maps/target_map" + tag + ".png", format='png')
plt.close()
# Set of edges which cross from one district to another one
cross_edges = facefinder.compute_cross_edges(partition)
# Edges of dual graph corresponding to cross_edges
dual_crosses = [edge for edge in dual.edges if dual.edges[edge]['original_name'] in cross_edges]
# Assigns the graph distance from the dual_crosses to each node of the dual graph
facefinder.distance_from_partition(dual, dual_crosses)
# Assign special faces based upon set distances
if secret:
special_faces = determine_special_faces_random(dual, special_param)
else:
special_faces = determine_special_faces(dual, special_param)
# Metamander around the partition by Sierpinskifying the special faces
face_sierpinski_mesh(partition, special_faces) | def metamander_around_partition(partition, dual, tag, secret=False, special_param=2):
"""Metamanders around a partition by determining the set of special faces,
and then sierpinskifying them.
Args:
partition (Gerrychain Partition): Partition to metamander around
dual (Networkx Graph): planar dual of partition's graph
secret (Boolean): whether to metamander 'in secret'. If True, determines
special faces randomly, else not.
special_param (numeric): additional parameter passed to special faces function
"""
facefinder.viz(partition, set([]))
plt.savefig("./plots/large_sample/target_maps/target_map" + tag + ".png", format='png')
plt.close()
# Set of edges which cross from one district to another one
cross_edges = facefinder.compute_cross_edges(partition)
# Edges of dual graph corresponding to cross_edges
dual_crosses = [edge for edge in dual.edges if dual.edges[edge]['original_name'] in cross_edges]
# Assigns the graph distance from the dual_crosses to each node of the dual graph
facefinder.distance_from_partition(dual, dual_crosses)
# Assign special faces based upon set distances
if secret:
special_faces = determine_special_faces_random(dual, special_param)
else:
special_faces = determine_special_faces(dual, special_param)
# Metamander around the partition by Sierpinskifying the special faces
face_sierpinski_mesh(partition, special_faces) |
Python | def saveRunStatistics(statistics, tag):
"""Saves the election statistics of a given list of partitions to a JSON file
Args:
statistics (Iterable): Iterable of the election statistics of the partition
of a run
tag (String): tag added to filename to identify run
"""
with open('generated_data/run_statistics_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(statistics, outfile)
except:
track = traceback.format_exc()
print(track)
print('Unable to save run statistics to file.') | def saveRunStatistics(statistics, tag):
"""Saves the election statistics of a given list of partitions to a JSON file
Args:
statistics (Iterable): Iterable of the election statistics of the partition
of a run
tag (String): tag added to filename to identify run
"""
with open('generated_data/run_statistics_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(statistics, outfile)
except:
track = traceback.format_exc()
print(track)
print('Unable to save run statistics to file.') |
Python | def run_chain(init_part, chaintype, length, ideal_population, id, tag):
"""Runs a Recom chain, and saves the seats won histogram to a file and
returns the most Gerrymandered plans for both PartyA and PartyB
Args:
init_part (Gerrychain Partition): initial partition of chain
chaintype (String): indicates which proposal to be used to generate
spanning tree during Recom. Must be either "tree" or "uniform_tree"
length (int): total steps of chain
id (String): id of experiment, used when printing progress
tag (String): tag added to filename to identify run
Raises:
RuntimeError: If chaintype is not "tree" nor 'uniform_tree"
Returns:
list of partitions generated by chain
"""
graph = init_part.graph
for edge in graph.edges():
graph.edges[edge]['cut_times'] = 0
graph.edges[edge]['sibling_cuts'] = 0
if 'siblings' not in graph.edges[edge]:
graph.edges[edge]['siblings'] = tuple([edge])
popbound = within_percent_of_ideal_population(init_part, config['EPSILON'])
# Determine proposal for generating spanning tree based upon parameter
if chaintype == "tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recognized. Use 'tree' or 'uniform_tree' instead")
# Chain to be run
chain = MarkovChain(tree_proposal, Validator([popbound]), accept=accept.always_accept, initial_state=init_part,
total_steps=length)
electionDict = {
'seats' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'won' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'efficiency_gap' : (lambda x: x[config['ELECTION_NAME']].efficiency_gap()),
'mean_median' : (lambda x: x[config['ELECTION_NAME']].mean_median()),
'mean_thirdian' : (lambda x: x[config['ELECTION_NAME']].mean_thirdian()),
'partisan_bias' : (lambda x: x[config['ELECTION_NAME']].partisan_bias()),
'partisan_gini' : (lambda x: x[config['ELECTION_NAME']].partisan_gini())
}
# Run chain, save each desired statistic, and keep track of cuts. Save most
# left gerrymandered partition
statistics = {statistic : [] for statistic in config['ELECTION_STATISTICS']}
# Value of a partition is determined by each of the Gerry Statistics.
# Lexicographical ordering is used, such that if two partitions have the same
# value under the first Gerry Statistic, then the second is used as a tie
# breaker, and so on.
leftManderVal = [float('inf')] * len(config['GERRY_STATISTICS'])
leftMander = None
for i, partition in enumerate(chain):
for edge in partition["cut_edges"]:
graph.edges[edge]['cut_times'] += 1
for sibling in graph.edges[edge]['siblings']:
graph.edges[sibling]['sibling_cuts'] += 1
# Save statistics of partition
for statistic in config['ELECTION_STATISTICS']:
statistics[statistic].append(electionDict[statistic](partition))
# Update left mander if applicable
curPartVal = [electionDict[statistic](partition)
for statistic in config['GERRY_STATISTICS']]
if curPartVal < leftManderVal:
leftManderVal = curPartVal
leftMander = partition
if i % 500 == 0:
print('{}: {}'.format(id, i))
saveRunStatistics(statistics, tag)
return leftMander | def run_chain(init_part, chaintype, length, ideal_population, id, tag):
"""Runs a Recom chain, and saves the seats won histogram to a file and
returns the most Gerrymandered plans for both PartyA and PartyB
Args:
init_part (Gerrychain Partition): initial partition of chain
chaintype (String): indicates which proposal to be used to generate
spanning tree during Recom. Must be either "tree" or "uniform_tree"
length (int): total steps of chain
id (String): id of experiment, used when printing progress
tag (String): tag added to filename to identify run
Raises:
RuntimeError: If chaintype is not "tree" nor 'uniform_tree"
Returns:
list of partitions generated by chain
"""
graph = init_part.graph
for edge in graph.edges():
graph.edges[edge]['cut_times'] = 0
graph.edges[edge]['sibling_cuts'] = 0
if 'siblings' not in graph.edges[edge]:
graph.edges[edge]['siblings'] = tuple([edge])
popbound = within_percent_of_ideal_population(init_part, config['EPSILON'])
# Determine proposal for generating spanning tree based upon parameter
if chaintype == "tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_mst_bipartition_tree_random)
elif chaintype == "uniform_tree":
tree_proposal = partial(recom, pop_col=config["POP_COL"], pop_target=ideal_population,
epsilon=config['EPSILON'], node_repeats=config['NODE_REPEATS'],
method=facefinder.my_uu_bipartition_tree_random)
else:
print("Chaintype used: ", chaintype)
raise RuntimeError("Chaintype not recognized. Use 'tree' or 'uniform_tree' instead")
# Chain to be run
chain = MarkovChain(tree_proposal, Validator([popbound]), accept=accept.always_accept, initial_state=init_part,
total_steps=length)
electionDict = {
'seats' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'won' : (lambda x: x[config['ELECTION_NAME']].seats('PartyA')),
'efficiency_gap' : (lambda x: x[config['ELECTION_NAME']].efficiency_gap()),
'mean_median' : (lambda x: x[config['ELECTION_NAME']].mean_median()),
'mean_thirdian' : (lambda x: x[config['ELECTION_NAME']].mean_thirdian()),
'partisan_bias' : (lambda x: x[config['ELECTION_NAME']].partisan_bias()),
'partisan_gini' : (lambda x: x[config['ELECTION_NAME']].partisan_gini())
}
# Run chain, save each desired statistic, and keep track of cuts. Save most
# left gerrymandered partition
statistics = {statistic : [] for statistic in config['ELECTION_STATISTICS']}
# Value of a partition is determined by each of the Gerry Statistics.
# Lexicographical ordering is used, such that if two partitions have the same
# value under the first Gerry Statistic, then the second is used as a tie
# breaker, and so on.
leftManderVal = [float('inf')] * len(config['GERRY_STATISTICS'])
leftMander = None
for i, partition in enumerate(chain):
for edge in partition["cut_edges"]:
graph.edges[edge]['cut_times'] += 1
for sibling in graph.edges[edge]['siblings']:
graph.edges[sibling]['sibling_cuts'] += 1
# Save statistics of partition
for statistic in config['ELECTION_STATISTICS']:
statistics[statistic].append(electionDict[statistic](partition))
# Update left mander if applicable
curPartVal = [electionDict[statistic](partition)
for statistic in config['GERRY_STATISTICS']]
if curPartVal < leftManderVal:
leftManderVal = curPartVal
leftMander = partition
if i % 500 == 0:
print('{}: {}'.format(id, i))
saveRunStatistics(statistics, tag)
return leftMander |
Python | def drawGraph(graph, property, tag):
"""Draws graph with edges colored according to the value of their chosen
property. Saves to file.
Args:
graph (Networkx Graph): graph to draw and save
property (String): property of edges to use for edge colormap
tag (String): tag added to filename to identify graph
"""
edge_colors = [graph.edges[edge][property] for edge in graph.edges()]
vmin = min(edge_colors)
vmax = max(edge_colors)
cmap = plt.get_cmap(config['COLORMAP1'])
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=edge_colors, node_shape='s',
edge_cmap=cmap, width=1, edge_vmin=vmin, edge_vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, shrink=0.8)
plt.savefig('./plots/edges_plots/edges_{}.png'.format(tag))
plt.close() | def drawGraph(graph, property, tag):
"""Draws graph with edges colored according to the value of their chosen
property. Saves to file.
Args:
graph (Networkx Graph): graph to draw and save
property (String): property of edges to use for edge colormap
tag (String): tag added to filename to identify graph
"""
edge_colors = [graph.edges[edge][property] for edge in graph.edges()]
vmin = min(edge_colors)
vmax = max(edge_colors)
cmap = plt.get_cmap(config['COLORMAP1'])
plt.figure()
nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=edge_colors, node_shape='s',
edge_cmap=cmap, width=1, edge_vmin=vmin, edge_vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm._A = []
plt.colorbar(sm, shrink=0.8)
plt.savefig('./plots/edges_plots/edges_{}.png'.format(tag))
plt.close() |
Python | def drawDoubleGraph(graph, property, tag):
"""Draws graph with edges colored according to the value of their chosen
property and whether or not they are from a special face. Saves to file.
Identical to drawGraph() excepts uses two different edge colormaps for whether
or not an edge comes from a special face.
Args:
graph (Networkx Graph): graph to draw and save
property (String): property of edges to use for edge colormap
tag (String): tag added to filename to identify graph
"""
special_edges = [edge for edge in graph.edges() if len(graph.edges[edge]['siblings']) > 1]
orig_edges = [edge for edge in graph.edges() if len(graph.edges[edge]['siblings']) == 1]
G_special = graph.edge_subgraph(special_edges)
G_orig = graph.edge_subgraph(orig_edges)
special_edge_colors = [graph.edges[edge][property] for edge in special_edges]
orig_edge_colors = [graph.edges[edge][property] for edge in orig_edges]
vmin = min(min(special_edge_colors), min(orig_edge_colors))
vmax = max(max(special_edge_colors), max(orig_edge_colors))
cmap1 = plt.get_cmap(config['COLORMAP1'])
cmap2 = plt.get_cmap(config['COLORMAP2'])
plt.figure()
nx.draw(G_orig, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=orig_edge_colors, node_shape='s',
edge_cmap=cmap1, width=1, edge_vmin=vmin, edge_vmax=vmax)
nx.draw(G_special, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=special_edge_colors, node_shape='s',
edge_cmap=cmap2, width=1, edge_vmin=vmin, edge_vmax=vmax)
sm1 = plt.cm.ScalarMappable(cmap=cmap1, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm1._A = []
clb_orig = plt.colorbar(sm1, shrink=0.8)
clb_orig.ax.set_title('Original')
sm2 = plt.cm.ScalarMappable(cmap=cmap2, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm2._A = []
clb_special = plt.colorbar(sm2, shrink=0.8)
clb_special.ax.set_title('Special')
plt.savefig('./plots/edges_plots/edges_{}.png'.format(tag))
plt.close() | def drawDoubleGraph(graph, property, tag):
"""Draws graph with edges colored according to the value of their chosen
property and whether or not they are from a special face. Saves to file.
Identical to drawGraph() excepts uses two different edge colormaps for whether
or not an edge comes from a special face.
Args:
graph (Networkx Graph): graph to draw and save
property (String): property of edges to use for edge colormap
tag (String): tag added to filename to identify graph
"""
special_edges = [edge for edge in graph.edges() if len(graph.edges[edge]['siblings']) > 1]
orig_edges = [edge for edge in graph.edges() if len(graph.edges[edge]['siblings']) == 1]
G_special = graph.edge_subgraph(special_edges)
G_orig = graph.edge_subgraph(orig_edges)
special_edge_colors = [graph.edges[edge][property] for edge in special_edges]
orig_edge_colors = [graph.edges[edge][property] for edge in orig_edges]
vmin = min(min(special_edge_colors), min(orig_edge_colors))
vmax = max(max(special_edge_colors), max(orig_edge_colors))
cmap1 = plt.get_cmap(config['COLORMAP1'])
cmap2 = plt.get_cmap(config['COLORMAP2'])
plt.figure()
nx.draw(G_orig, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=orig_edge_colors, node_shape='s',
edge_cmap=cmap1, width=1, edge_vmin=vmin, edge_vmax=vmax)
nx.draw(G_special, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,
edge_color=special_edge_colors, node_shape='s',
edge_cmap=cmap2, width=1, edge_vmin=vmin, edge_vmax=vmax)
sm1 = plt.cm.ScalarMappable(cmap=cmap1, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm1._A = []
clb_orig = plt.colorbar(sm1, shrink=0.8)
clb_orig.ax.set_title('Original')
sm2 = plt.cm.ScalarMappable(cmap=cmap2, norm=plt.Normalize(vmin = vmin, vmax=vmax))
sm2._A = []
clb_special = plt.colorbar(sm2, shrink=0.8)
clb_special.ax.set_title('Special')
plt.savefig('./plots/edges_plots/edges_{}.png'.format(tag))
plt.close() |
Python | def saveGraphStatistics(graph, tag):
"""Saves the statistics of a graph to JSON file.
Args:
graph (Networkx Graph): graph to have data saved
tag ([type]): tag added to filename to identify graph
"""
data = [(edge, graph.edges[edge]['cut_times'], graph.edges[edge]['sibling_cuts']) for edge in graph.edges()]
with open('generated_data/graph_statistics_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(data, outfile, indent=2)
except:
track = traceback.format_exc()
print(track)
print('Unable to save graph statistics to file.') | def saveGraphStatistics(graph, tag):
"""Saves the statistics of a graph to JSON file.
Args:
graph (Networkx Graph): graph to have data saved
tag ([type]): tag added to filename to identify graph
"""
data = [(edge, graph.edges[edge]['cut_times'], graph.edges[edge]['sibling_cuts']) for edge in graph.edges()]
with open('generated_data/graph_statistics_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(data, outfile, indent=2)
except:
track = traceback.format_exc()
print(track)
print('Unable to save graph statistics to file.') |
Python | def savePartition(partition, tag):
"""Saves a partition to a JSON file
Args:
partition (Gerrychain Partition): partition to save
tag (String): tag added to filename to identify partition
"""
with open('generated_data/partition_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(partition.assignment.to_dict(), outfile, indent=2)
except:
track = traceback.format_exc()
print(track)
print('Unable to save partition to file') | def savePartition(partition, tag):
"""Saves a partition to a JSON file
Args:
partition (Gerrychain Partition): partition to save
tag (String): tag added to filename to identify partition
"""
with open('generated_data/partition_{}.json'.format(tag), 'w') as outfile:
try:
json.dump(partition.assignment.to_dict(), outfile, indent=2)
except:
track = traceback.format_exc()
print(track)
print('Unable to save partition to file') |
Python | def main(config_data, id):
"""Runs a single experiment with the given config file. Loads a graph,
runs a Chain to search for a Gerrymander, metamanders around that partition,
runs another chain, and then saves the generated data.
Args:
config_data (Object): configuration of experiment loaded from JSON file
id (String): id of experiment, used in tags to differentiate between
experiments
"""
try:
timeBeg = time.time()
print('Experiment', id, 'has begun')
# Save configuration into global variable
global config
config = config_data
# Get graph and dual graph
graph, dual = preprocessing(config["INPUT_GRAPH_FILENAME"])
# List of districts in original graph
parts = list(set([graph.nodes[node][config['ASSIGN_COL']] for node in graph.nodes()]))
# Ideal population of districts
ideal_pop = sum([graph.nodes[node][config['POP_COL']] for node in graph.nodes()]) / len(parts)
# Initialize partition
election = Election(
config['ELECTION_NAME'],
{'PartyA': config['PARTY_A_COL'],
'PartyB': config['PARTY_B_COL']}
)
updaters = {'population': Tally(config['POP_COL']),
'cut_edges': cut_edges,
config['ELECTION_NAME'] : election
}
partition = Partition(graph=graph, assignment=config['ASSIGN_COL'], updaters=updaters)
# Run Chain to search for a gerrymander, and get it
mander = run_chain(partition, config['CHAIN_TYPE'],
config['FIND_GERRY_LENGTH'], ideal_pop, id + 'a',
config['ORIG_RUN_STATS_TAG'] + id)
savePartition(mander, config['LEFT_MANDER_TAG'] + id)
# Metamanders around the found gerrymander
metamander_around_partition(mander, dual, config['TARGET_TAG'] + id, config['SECRET'], config['META_PARAM'])
# Refresh assignment and election of partition
updaters[config['ELECTION_NAME']] = Election(
config['ELECTION_NAME'],
{'PartyA': config['PARTY_A_COL'],
'PartyB': config['PARTY_B_COL']}
)
partition = Partition(graph=graph, assignment=config['ASSIGN_COL'], updaters=updaters)
# Run chain again
run_chain(partition, config['CHAIN_TYPE'], config['SAMPLE_META_LENGTH'],
ideal_pop, id + 'b', config['GERRY_RUN_STATS_TAG'] + id)
# Save data from experiment to JSON files
drawGraph(partition.graph, 'cut_times', config['GRAPH_TAG'] + '_single_raw_' + id)
drawGraph(partition.graph, 'sibling_cuts', config['GRAPH_TAG'] + '_single_adjusted_' + id)
drawDoubleGraph(partition.graph, 'cut_times', config['GRAPH_TAG'] + '_double_raw_' + id)
drawDoubleGraph(partition.graph, 'sibling_cuts', config['GRAPH_TAG'] + '_double_adjusted_' + id)
saveGraphStatistics(partition.graph, config['GRAPH_STATISTICS_TAG'] + id)
print('Experiment {} completed in {:.2f} seconds'.format(id, time.time() - timeBeg))
except Exception as e:
# Print notification if any experiment fails to complete
track = traceback.format_exc()
print(track)
print('Experiment {} failed to complete after {:.2f} seconds'.format(id, time.time() - timeBeg)) | def main(config_data, id):
"""Runs a single experiment with the given config file. Loads a graph,
runs a Chain to search for a Gerrymander, metamanders around that partition,
runs another chain, and then saves the generated data.
Args:
config_data (Object): configuration of experiment loaded from JSON file
id (String): id of experiment, used in tags to differentiate between
experiments
"""
try:
timeBeg = time.time()
print('Experiment', id, 'has begun')
# Save configuration into global variable
global config
config = config_data
# Get graph and dual graph
graph, dual = preprocessing(config["INPUT_GRAPH_FILENAME"])
# List of districts in original graph
parts = list(set([graph.nodes[node][config['ASSIGN_COL']] for node in graph.nodes()]))
# Ideal population of districts
ideal_pop = sum([graph.nodes[node][config['POP_COL']] for node in graph.nodes()]) / len(parts)
# Initialize partition
election = Election(
config['ELECTION_NAME'],
{'PartyA': config['PARTY_A_COL'],
'PartyB': config['PARTY_B_COL']}
)
updaters = {'population': Tally(config['POP_COL']),
'cut_edges': cut_edges,
config['ELECTION_NAME'] : election
}
partition = Partition(graph=graph, assignment=config['ASSIGN_COL'], updaters=updaters)
# Run Chain to search for a gerrymander, and get it
mander = run_chain(partition, config['CHAIN_TYPE'],
config['FIND_GERRY_LENGTH'], ideal_pop, id + 'a',
config['ORIG_RUN_STATS_TAG'] + id)
savePartition(mander, config['LEFT_MANDER_TAG'] + id)
# Metamanders around the found gerrymander
metamander_around_partition(mander, dual, config['TARGET_TAG'] + id, config['SECRET'], config['META_PARAM'])
# Refresh assignment and election of partition
updaters[config['ELECTION_NAME']] = Election(
config['ELECTION_NAME'],
{'PartyA': config['PARTY_A_COL'],
'PartyB': config['PARTY_B_COL']}
)
partition = Partition(graph=graph, assignment=config['ASSIGN_COL'], updaters=updaters)
# Run chain again
run_chain(partition, config['CHAIN_TYPE'], config['SAMPLE_META_LENGTH'],
ideal_pop, id + 'b', config['GERRY_RUN_STATS_TAG'] + id)
# Save data from experiment to JSON files
drawGraph(partition.graph, 'cut_times', config['GRAPH_TAG'] + '_single_raw_' + id)
drawGraph(partition.graph, 'sibling_cuts', config['GRAPH_TAG'] + '_single_adjusted_' + id)
drawDoubleGraph(partition.graph, 'cut_times', config['GRAPH_TAG'] + '_double_raw_' + id)
drawDoubleGraph(partition.graph, 'sibling_cuts', config['GRAPH_TAG'] + '_double_adjusted_' + id)
saveGraphStatistics(partition.graph, config['GRAPH_STATISTICS_TAG'] + id)
print('Experiment {} completed in {:.2f} seconds'.format(id, time.time() - timeBeg))
except Exception as e:
# Print notification if any experiment fails to complete
track = traceback.format_exc()
print(track)
print('Experiment {} failed to complete after {:.2f} seconds'.format(id, time.time() - timeBeg)) |
Python | def captured_templates(app):
"""Use signals to capture the templates rendered for a route."""
recorded = []
def record(sender, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record, app)
try:
yield recorded
finally:
template_rendered.disconnect(record, app) | def captured_templates(app):
"""Use signals to capture the templates rendered for a route."""
recorded = []
def record(sender, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record, app)
try:
yield recorded
finally:
template_rendered.disconnect(record, app) |
Python | def template_used(app, client, route):
"""Return True if the named template was the only one rendered by the route."""
with captured_templates(app) as templates:
client.get(route, follow_redirects=True)
return len(templates), templates[0][0].name | def template_used(app, client, route):
"""Return True if the named template was the only one rendered by the route."""
with captured_templates(app) as templates:
client.get(route, follow_redirects=True)
return len(templates), templates[0][0].name |
Python | def add_and_commit(self):
"""Add the record to the session and commit it to the database."""
if self.commit:
db.session.add(self)
try:
db.session.commit()
except IntegrityError as error:
db.session.rollback()
raise error | def add_and_commit(self):
"""Add the record to the session and commit it to the database."""
if self.commit:
db.session.add(self)
try:
db.session.commit()
except IntegrityError as error:
db.session.rollback()
raise error |
Python | def password(self, password, commit=True):
"""Hash the given password and store it in the database.
:param commit: If True, commits the record to the database, defaults to True
"""
self.commit = commit
self._hash = bcrypt.generate_password_hash(
password, current_app.config['BCRYPT_LOG_ROUNDS']
).decode('utf-8')
self.add_and_commit() | def password(self, password, commit=True):
"""Hash the given password and store it in the database.
:param commit: If True, commits the record to the database, defaults to True
"""
self.commit = commit
self._hash = bcrypt.generate_password_hash(
password, current_app.config['BCRYPT_LOG_ROUNDS']
).decode('utf-8')
self.add_and_commit() |
Python | def create_fake_users(cls, count=100):
"""Create the number of fake users given by count.
Returns a list of the created users for possible echoing to a cli function.
"""
users = []
fake = Faker()
for _ in range(count):
email = fake.email()
password = fake.password()
users.append((email, password))
user = cls(email=email, password=password, commit=False)
db.session.add(user)
db.session.commit()
return users | def create_fake_users(cls, count=100):
"""Create the number of fake users given by count.
Returns a list of the created users for possible echoing to a cli function.
"""
users = []
fake = Faker()
for _ in range(count):
email = fake.email()
password = fake.password()
users.append((email, password))
user = cls(email=email, password=password, commit=False)
db.session.add(user)
db.session.commit()
return users |
Python | def create_app(config=None):
"""Return a configured instance of Flask."""
app = Flask(
__name__,
template_folder='./templates',
static_folder='./static'
)
config = config or os.getenv('APP_SETTINGS', 'app.ProdConfig')
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
from .models import User
login_manager.login_view = 'user.login'
login_manager.login_message = const.LOGIN_DIRECTIVE_MSG
login_manager.login_message_category = 'danger'
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
@app.errorhandler(HTTPException)
def error_handler(err):
code = err.code
return render_template('errors.html', err=err), code
@app.shell_context_processor
def shell_context_processor():
return {
'app': app,
'db': db,
'User': User,
}
@app.cli.command()
@click.option('-e', '--email', prompt='Email', help="The user's email address.")
@click.option('-p', '--password', prompt='Password', help="The user's password.")
def create_user(email, password):
"""Offer a CLI interface into creating a user."""
try:
User(email=email, password=password)
except IntegrityError:
print('Error: Duplicate email address')
@app.cli.command()
@click.option('-c', '--count', default=100,
help='The number of fake users to create. Defaults to 100')
@click.option('--no-echo', is_flag=True, default=False,
help='If passed, suppressed record output')
def create_fake_users(count, no_echo):
"""Create the indicated number of fake users and output their emails and passwords."""
users = User.create_fake_users(count=count)
if not no_echo:
for user in users:
print(f'{user[0]}: {user[1]}')
@app.cli.command()
def create_fake_data():
"""Create dummy records in the database."""
User.create_fake_users()
return app | def create_app(config=None):
"""Return a configured instance of Flask."""
app = Flask(
__name__,
template_folder='./templates',
static_folder='./static'
)
config = config or os.getenv('APP_SETTINGS', 'app.ProdConfig')
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
from .models import User
login_manager.login_view = 'user.login'
login_manager.login_message = const.LOGIN_DIRECTIVE_MSG
login_manager.login_message_category = 'danger'
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
@app.errorhandler(HTTPException)
def error_handler(err):
code = err.code
return render_template('errors.html', err=err), code
@app.shell_context_processor
def shell_context_processor():
return {
'app': app,
'db': db,
'User': User,
}
@app.cli.command()
@click.option('-e', '--email', prompt='Email', help="The user's email address.")
@click.option('-p', '--password', prompt='Password', help="The user's password.")
def create_user(email, password):
"""Offer a CLI interface into creating a user."""
try:
User(email=email, password=password)
except IntegrityError:
print('Error: Duplicate email address')
@app.cli.command()
@click.option('-c', '--count', default=100,
help='The number of fake users to create. Defaults to 100')
@click.option('--no-echo', is_flag=True, default=False,
help='If passed, suppressed record output')
def create_fake_users(count, no_echo):
"""Create the indicated number of fake users and output their emails and passwords."""
users = User.create_fake_users(count=count)
if not no_echo:
for user in users:
print(f'{user[0]}: {user[1]}')
@app.cli.command()
def create_fake_data():
"""Create dummy records in the database."""
User.create_fake_users()
return app |
Python | def create_user(email, password):
"""Offer a CLI interface into creating a user."""
try:
User(email=email, password=password)
except IntegrityError:
print('Error: Duplicate email address') | def create_user(email, password):
"""Offer a CLI interface into creating a user."""
try:
User(email=email, password=password)
except IntegrityError:
print('Error: Duplicate email address') |
Python | def create_fake_users(count, no_echo):
"""Create the indicated number of fake users and output their emails and passwords."""
users = User.create_fake_users(count=count)
if not no_echo:
for user in users:
print(f'{user[0]}: {user[1]}') | def create_fake_users(count, no_echo):
"""Create the indicated number of fake users and output their emails and passwords."""
users = User.create_fake_users(count=count)
if not no_echo:
for user in users:
print(f'{user[0]}: {user[1]}') |
Python | def reset_with_token(token):
"""Updates the user's password."""
timed_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
try:
email = timed_serializer.loads(token, salt='recovery-token', max_age=3600)
except BadSignature:
abort(404)
form = PasswordForm()
if form.validate_on_submit():
user = User.select_by_email(email=email)
user.password = form.password.data
flash(const.RESET_PASSWORD_SUCCESS, 'success')
return redirect(url_for('user.login'))
return render_template('user/password.html', form=form) | def reset_with_token(token):
"""Updates the user's password."""
timed_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
try:
email = timed_serializer.loads(token, salt='recovery-token', max_age=3600)
except BadSignature:
abort(404)
form = PasswordForm()
if form.validate_on_submit():
user = User.select_by_email(email=email)
user.password = form.password.data
flash(const.RESET_PASSWORD_SUCCESS, 'success')
return redirect(url_for('user.login'))
return render_template('user/password.html', form=form) |
Python | def _create_vehicles(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
other_per_controlled = near_split(self.config["vehicles_count"], num_bins=self.config["controlled_vehicles"])
self.controlled_vehicles = []
for others in other_per_controlled:
#Added so that the ego will not necessarily be at the back position
#It will be randomly positioned between the other vehicles
split = self.np_random.randint(others//2)
for _ in range(split):
target_speed = self.np_random.uniform(1.0*self.config["speed_limit"],1.2*self.config["speed_limit"])
speed = target_speed + self.np_random.normal(0,2)
vehicle = other_vehicles_type.create_random(self.road, spacing=1 / self.config["vehicles_density"],
speed=speed)
vehicle.target_speed = target_speed
vehicle.randomize_behavior()
self.road.vehicles.append(vehicle)
controlled_vehicle = self.action_type.vehicle_class.create_random(
self.road,
speed=25,
lane_id=self.config["initial_lane_id"],
spacing=self.config["ego_spacing"]
)
self.controlled_vehicles.append(controlled_vehicle)
#self.road.vehicles.append(controlled_vehicle)
self.road.vehicles.insert(0,controlled_vehicle)
for _ in range(split,others):
target_speed = self.np_random.uniform(0.8*self.config["speed_limit"],0.95*self.config["speed_limit"])
speed = target_speed + self.np_random.normal(0,2)
vehicle = other_vehicles_type.create_random(self.road, spacing=1 / self.config["vehicles_density"],
speed=speed)
vehicle.target_speed = target_speed
vehicle.randomize_behavior()
self.road.vehicles.append(vehicle) | def _create_vehicles(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
other_vehicles_type = utils.class_from_path(self.config["other_vehicles_type"])
other_per_controlled = near_split(self.config["vehicles_count"], num_bins=self.config["controlled_vehicles"])
self.controlled_vehicles = []
for others in other_per_controlled:
#Added so that the ego will not necessarily be at the back position
#It will be randomly positioned between the other vehicles
split = self.np_random.randint(others//2)
for _ in range(split):
target_speed = self.np_random.uniform(1.0*self.config["speed_limit"],1.2*self.config["speed_limit"])
speed = target_speed + self.np_random.normal(0,2)
vehicle = other_vehicles_type.create_random(self.road, spacing=1 / self.config["vehicles_density"],
speed=speed)
vehicle.target_speed = target_speed
vehicle.randomize_behavior()
self.road.vehicles.append(vehicle)
controlled_vehicle = self.action_type.vehicle_class.create_random(
self.road,
speed=25,
lane_id=self.config["initial_lane_id"],
spacing=self.config["ego_spacing"]
)
self.controlled_vehicles.append(controlled_vehicle)
#self.road.vehicles.append(controlled_vehicle)
self.road.vehicles.insert(0,controlled_vehicle)
for _ in range(split,others):
target_speed = self.np_random.uniform(0.8*self.config["speed_limit"],0.95*self.config["speed_limit"])
speed = target_speed + self.np_random.normal(0,2)
vehicle = other_vehicles_type.create_random(self.road, spacing=1 / self.config["vehicles_density"],
speed=speed)
vehicle.target_speed = target_speed
vehicle.randomize_behavior()
self.road.vehicles.append(vehicle) |
Python | def _reward(self, action: Action) -> float:
"""
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.
:param action: the last action performed
:return: the corresponding reward
"""
neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)
lane = self.vehicle.target_lane_index[2] if isinstance(self.vehicle, ControlledVehicle) \
else self.vehicle.lane_index[2]
if self.vehicle.speed <= self.config["target_speed"]:
scaled_speed = utils.lmap(self.vehicle.speed, self.config["reward_speed_range_lower"], [0, 1])
else:
scaled_speed = 1 - utils.lmap(self.vehicle.speed, self.config["reward_speed_range_upper"], [0, 1])
rear_break, front_dist = self.calc_rear_break_front_dist()
scaled_deceleration = utils.lmap(-rear_break,self.config["reward_rear_deceleration_range"], [0,1])
scaled_front_distance = 1 - utils.lmap(front_dist, self.config["reward_front_dist_range"],[0,1])
dist_from_lane_center = abs(self.vehicle.lane.local_coordinates(self.vehicle.position)[1])
scaled_dist_from_lane_center = utils.lmap(dist_from_lane_center,[0,0.5*self.vehicle.lane.DEFAULT_WIDTH],[0,1])
reward = \
+ self.config["collision_reward"] * self.vehicle.crashed \
+ self.config["right_lane_reward"] * lane / max(len(neighbours) - 1, 1) \
+ self.config["high_speed_reward"] * np.clip(scaled_speed, 0, 1) \
+ self.config["reward_rear_brake"] * np.clip(scaled_deceleration, 0, 1)\
+ self.config["reward_front_dist"] * np.clip(scaled_front_distance, 0, 1)
#+ self.config["reward_off_road"] * (not self.vehicle.on_road)
#reward = utils.lmap(reward,
# [self.config["collision_reward"],
# self.config["high_speed_reward"] + self.config["right_lane_reward"]],
# [0, 1])
if not isinstance(self.action_type,DiscreteMetaAction):
reward += self.config["reward_non_centered"] * np.clip(scaled_dist_from_lane_center, 0, 1)
reward = -self.config["high_speed_reward"] if not self.vehicle.on_road else reward
return reward | def _reward(self, action: Action) -> float:
"""
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.
:param action: the last action performed
:return: the corresponding reward
"""
neighbours = self.road.network.all_side_lanes(self.vehicle.lane_index)
lane = self.vehicle.target_lane_index[2] if isinstance(self.vehicle, ControlledVehicle) \
else self.vehicle.lane_index[2]
if self.vehicle.speed <= self.config["target_speed"]:
scaled_speed = utils.lmap(self.vehicle.speed, self.config["reward_speed_range_lower"], [0, 1])
else:
scaled_speed = 1 - utils.lmap(self.vehicle.speed, self.config["reward_speed_range_upper"], [0, 1])
rear_break, front_dist = self.calc_rear_break_front_dist()
scaled_deceleration = utils.lmap(-rear_break,self.config["reward_rear_deceleration_range"], [0,1])
scaled_front_distance = 1 - utils.lmap(front_dist, self.config["reward_front_dist_range"],[0,1])
dist_from_lane_center = abs(self.vehicle.lane.local_coordinates(self.vehicle.position)[1])
scaled_dist_from_lane_center = utils.lmap(dist_from_lane_center,[0,0.5*self.vehicle.lane.DEFAULT_WIDTH],[0,1])
reward = \
+ self.config["collision_reward"] * self.vehicle.crashed \
+ self.config["right_lane_reward"] * lane / max(len(neighbours) - 1, 1) \
+ self.config["high_speed_reward"] * np.clip(scaled_speed, 0, 1) \
+ self.config["reward_rear_brake"] * np.clip(scaled_deceleration, 0, 1)\
+ self.config["reward_front_dist"] * np.clip(scaled_front_distance, 0, 1)
#+ self.config["reward_off_road"] * (not self.vehicle.on_road)
#reward = utils.lmap(reward,
# [self.config["collision_reward"],
# self.config["high_speed_reward"] + self.config["right_lane_reward"]],
# [0, 1])
if not isinstance(self.action_type,DiscreteMetaAction):
reward += self.config["reward_non_centered"] * np.clip(scaled_dist_from_lane_center, 0, 1)
reward = -self.config["high_speed_reward"] if not self.vehicle.on_road else reward
return reward |
Python | def acceleration(self,
ego_vehicle: ControlledVehicle,
front_vehicle: Vehicle = None,
rear_vehicle: Vehicle = None) -> float:
"""
Compute an acceleration command with the Intelligent Driver Model.
The acceleration is chosen so as to:
- reach a target speed;
- maintain a minimum safety distance (and safety time) w.r.t the front vehicle.
:param ego_vehicle: the vehicle whose desired acceleration is to be computed. It does not have to be an
IDM vehicle, which is why this method is a class method. This allows an IDM vehicle to
reason about other vehicles behaviors even though they may not IDMs.
:param front_vehicle: the vehicle preceding the ego-vehicle
:param rear_vehicle: the vehicle following the ego-vehicle
:return: the acceleration command for the ego-vehicle [m/s2]
"""
if not ego_vehicle or not isinstance(ego_vehicle, Vehicle):
return 0
ego_target_speed = abs(utils.not_zero(getattr(ego_vehicle, "target_speed", 0)))
acceleration_idm = self.COMFORT_ACC_MAX * (
1 - np.power(max(ego_vehicle.speed, 0) / ego_target_speed, self.DELTA))
if front_vehicle:
d = ego_vehicle.lane_distance_to(front_vehicle)
acceleration_idm -= self.COMFORT_ACC_MAX * \
np.power(self.desired_gap(ego_vehicle, front_vehicle) / utils.not_zero(d), 2)
front_acc = front_vehicle.action["acceleration"]
ego_speed = ego_vehicle.speed
front_speed = front_vehicle.speed
if front_speed*(ego_speed - front_speed) < -2 * d * front_acc:
acceleration_cah = (ego_speed ** 2 * front_acc) \
/ (front_speed ** 2 - 2 * d * front_acc)
else:
acceleration_cah = front_acc - ((ego_speed - front_speed) ** 2 * np.heaviside(ego_speed - front_speed,0)) / (2 * d)
if acceleration_idm >= acceleration_cah:
acceleration = acceleration_idm
else:
acceleration = (1 - self.COOLNESS) * acceleration_idm + \
self.COOLNESS * (acceleration_cah +
abs(self.COMFORT_ACC_MIN) * np.tanh((acceleration_idm - acceleration_cah)/abs(self.COMFORT_ACC_MIN)))
else:
acceleration = acceleration_idm
return acceleration | def acceleration(self,
ego_vehicle: ControlledVehicle,
front_vehicle: Vehicle = None,
rear_vehicle: Vehicle = None) -> float:
"""
Compute an acceleration command with the Intelligent Driver Model.
The acceleration is chosen so as to:
- reach a target speed;
- maintain a minimum safety distance (and safety time) w.r.t the front vehicle.
:param ego_vehicle: the vehicle whose desired acceleration is to be computed. It does not have to be an
IDM vehicle, which is why this method is a class method. This allows an IDM vehicle to
reason about other vehicles behaviors even though they may not IDMs.
:param front_vehicle: the vehicle preceding the ego-vehicle
:param rear_vehicle: the vehicle following the ego-vehicle
:return: the acceleration command for the ego-vehicle [m/s2]
"""
if not ego_vehicle or not isinstance(ego_vehicle, Vehicle):
return 0
ego_target_speed = abs(utils.not_zero(getattr(ego_vehicle, "target_speed", 0)))
acceleration_idm = self.COMFORT_ACC_MAX * (
1 - np.power(max(ego_vehicle.speed, 0) / ego_target_speed, self.DELTA))
if front_vehicle:
d = ego_vehicle.lane_distance_to(front_vehicle)
acceleration_idm -= self.COMFORT_ACC_MAX * \
np.power(self.desired_gap(ego_vehicle, front_vehicle) / utils.not_zero(d), 2)
front_acc = front_vehicle.action["acceleration"]
ego_speed = ego_vehicle.speed
front_speed = front_vehicle.speed
if front_speed*(ego_speed - front_speed) < -2 * d * front_acc:
acceleration_cah = (ego_speed ** 2 * front_acc) \
/ (front_speed ** 2 - 2 * d * front_acc)
else:
acceleration_cah = front_acc - ((ego_speed - front_speed) ** 2 * np.heaviside(ego_speed - front_speed,0)) / (2 * d)
if acceleration_idm >= acceleration_cah:
acceleration = acceleration_idm
else:
acceleration = (1 - self.COOLNESS) * acceleration_idm + \
self.COOLNESS * (acceleration_cah +
abs(self.COMFORT_ACC_MIN) * np.tanh((acceleration_idm - acceleration_cah)/abs(self.COMFORT_ACC_MIN)))
else:
acceleration = acceleration_idm
return acceleration |
Python | def index():
'''
When you request the root path, you'll get the index.html template.
'''
return render_template('index.html', links=all_links()) | def index():
'''
When you request the root path, you'll get the index.html template.
'''
return render_template('index.html', links=all_links()) |
Python | def validate_token(hostname, token):
""" Validate the user token.
Returns True for a successful validation, False otherwise
"""
if token:
url = hostname + "/api.asp?cmd=logon&token=" + token
try:
response = urlopen(url)
if token in response.read():
return True
except:
pass # Always catch authentication error and return (other methods may be attempted)
print('Failed to use token provided')
return False | def validate_token(hostname, token):
""" Validate the user token.
Returns True for a successful validation, False otherwise
"""
if token:
url = hostname + "/api.asp?cmd=logon&token=" + token
try:
response = urlopen(url)
if token in response.read():
return True
except:
pass # Always catch authentication error and return (other methods may be attempted)
print('Failed to use token provided')
return False |
Python | def FogBugz(fbConstructor, hostname, token=None, username=None, password=None, fogbugzrc=None,
fogbugzPrefix='', interactive=True, storeCredentials=False):
""" Calls the constructor specified by fbConstructor (hence, despite this being a function use
CapWords naming convention)
fbConstructor: Fogbugz constructor class. Typically fogbugz.FogBugz, fborm.FogBugzORM or
kiln.Kiln
hostname: passed directly to the fbInterface
token, username, password: input credentials
fogbugzrc, fogbugzPrefix, interactive: Passed to method get_credential
storeCredentials: If active, create attributes token, username and password. This opens the
door to using it for login to other system, which is convenient, but the
programmer can also do what he wants with the password (which is bad).
The following is attempted in sequence:
1. Use token provided
2. Use username provided
3. Get token with function get_credential
4. Get username and password with function get_credential (interactive=True)
TODO: Support passing a list of args to fbConstructor
"""
if token and (username or password):
raise TypeError("If you supply 'token' you cannot supply 'username' or 'password'")
if token and validate_token(hostname, token):
fb = connect(fbConstructor, hostname, token=token)
elif username:
if not password:
password = get_credential('password', fogbugzrc, fogbugzPrefix, interactive)
fb = connect(fbConstructor, hostname, username=username, password=password)
else:
token = get_credential('token', fogbugzrc, fogbugzPrefix, interactive=False)
if validate_token(hostname, token):
fb = connect(fbConstructor, hostname, token=token)
else:
username = get_credential('username', fogbugzrc, fogbugzPrefix, interactive)
password = get_credential('password', fogbugzrc, fogbugzPrefix, interactive)
fb = connect(fbConstructor, hostname, username=username, password=password)
if storeCredentials:
fb.token = token
fb.username = username
fb.password = password
return fb | def FogBugz(fbConstructor, hostname, token=None, username=None, password=None, fogbugzrc=None,
fogbugzPrefix='', interactive=True, storeCredentials=False):
""" Calls the constructor specified by fbConstructor (hence, despite this being a function use
CapWords naming convention)
fbConstructor: Fogbugz constructor class. Typically fogbugz.FogBugz, fborm.FogBugzORM or
kiln.Kiln
hostname: passed directly to the fbInterface
token, username, password: input credentials
fogbugzrc, fogbugzPrefix, interactive: Passed to method get_credential
storeCredentials: If active, create attributes token, username and password. This opens the
door to using it for login to other system, which is convenient, but the
programmer can also do what he wants with the password (which is bad).
The following is attempted in sequence:
1. Use token provided
2. Use username provided
3. Get token with function get_credential
4. Get username and password with function get_credential (interactive=True)
TODO: Support passing a list of args to fbConstructor
"""
if token and (username or password):
raise TypeError("If you supply 'token' you cannot supply 'username' or 'password'")
if token and validate_token(hostname, token):
fb = connect(fbConstructor, hostname, token=token)
elif username:
if not password:
password = get_credential('password', fogbugzrc, fogbugzPrefix, interactive)
fb = connect(fbConstructor, hostname, username=username, password=password)
else:
token = get_credential('token', fogbugzrc, fogbugzPrefix, interactive=False)
if validate_token(hostname, token):
fb = connect(fbConstructor, hostname, token=token)
else:
username = get_credential('username', fogbugzrc, fogbugzPrefix, interactive)
password = get_credential('password', fogbugzrc, fogbugzPrefix, interactive)
fb = connect(fbConstructor, hostname, username=username, password=password)
if storeCredentials:
fb.token = token
fb.username = username
fb.password = password
return fb |
Python | def connect(fb_constructor, hostname, token=None, username=None, password=None):
''' Call constructor fb_constructor and log on'''
if bool(token) == bool(username):
raise TypeError("If you pass 'token' you cannot supply 'username'")
if bool(username) != bool(password):
raise TypeError("username and password should be set both or none")
if token:
fb = fb_constructor(hostname, token=token)
else:
fb = fb_constructor(hostname)
fb.logon(username, password)
return fb | def connect(fb_constructor, hostname, token=None, username=None, password=None):
''' Call constructor fb_constructor and log on'''
if bool(token) == bool(username):
raise TypeError("If you pass 'token' you cannot supply 'username'")
if bool(username) != bool(password):
raise TypeError("username and password should be set both or none")
if token:
fb = fb_constructor(hostname, token=token)
else:
fb = fb_constructor(hostname)
fb.logon(username, password)
return fb |
Python | def sendTouC():
"""Simulates motors sending CAN messages back to the uC
Returns:
CAN ID of the last message sent
"""
currentID = CarState.motor_info[CarState.infoIdx]
if currentID == CarState.VELOCITY_ID:
tx_message = int(CarState.CURRENT_VELOCITY)
tx_message = (tx_message << 32) + int(
(60 * CarState.CURRENT_VELOCITY) / (math.pi * 2 * CarState.radius)
)
elif currentID == CarState.MOTOR_BUS_ID:
tx_message = int(CarState.ABSOLUTE_CURRENT)
tx_message = (tx_message << 32) + int(CarState.BUS_VOLTAGE)
elif currentID == CarState.MOTOR_PHASE_CURRENT_ID:
tx_message = int(CarState.PHASE_C_CUR)
tx_message = (tx_message << 32) + int(CarState.PHASE_B_CUR)
elif currentID == CarState.MOTOR_VOLTAGE_VECTOR_ID:
tx_message = int(CarState.V_REAL)
tx_message = (tx_message << 32) + int(CarState.V_IMAGINE)
elif currentID == CarState.MOTOR_CURRENT_VECTOR_ID:
tx_message = int(CarState.I_REAL)
tx_message = (tx_message << 32) + int(CarState.I_IMAGINE)
elif currentID == CarState.MOTOR_BACKEMF_ID:
tx_message = int(CarState.REAL_COMP)
tx_message = (tx_message << 32) + int(CarState.NEUTRAL_MOTOR)
elif currentID == CarState.MOTOR_TEMP_ID:
tx_message = int(CarState.PHASEC_TEMP)
tx_message = (tx_message << 32) + int(CarState.INTERNAL_TEMP)
write(currentID, tx_message)
CarState.infoIdx = (CarState.infoIdx + 1) % 7 # increment index
CarState.last_message = [hex(currentID), hex(tx_message)]
return CarState.last_message | def sendTouC():
"""Simulates motors sending CAN messages back to the uC
Returns:
CAN ID of the last message sent
"""
currentID = CarState.motor_info[CarState.infoIdx]
if currentID == CarState.VELOCITY_ID:
tx_message = int(CarState.CURRENT_VELOCITY)
tx_message = (tx_message << 32) + int(
(60 * CarState.CURRENT_VELOCITY) / (math.pi * 2 * CarState.radius)
)
elif currentID == CarState.MOTOR_BUS_ID:
tx_message = int(CarState.ABSOLUTE_CURRENT)
tx_message = (tx_message << 32) + int(CarState.BUS_VOLTAGE)
elif currentID == CarState.MOTOR_PHASE_CURRENT_ID:
tx_message = int(CarState.PHASE_C_CUR)
tx_message = (tx_message << 32) + int(CarState.PHASE_B_CUR)
elif currentID == CarState.MOTOR_VOLTAGE_VECTOR_ID:
tx_message = int(CarState.V_REAL)
tx_message = (tx_message << 32) + int(CarState.V_IMAGINE)
elif currentID == CarState.MOTOR_CURRENT_VECTOR_ID:
tx_message = int(CarState.I_REAL)
tx_message = (tx_message << 32) + int(CarState.I_IMAGINE)
elif currentID == CarState.MOTOR_BACKEMF_ID:
tx_message = int(CarState.REAL_COMP)
tx_message = (tx_message << 32) + int(CarState.NEUTRAL_MOTOR)
elif currentID == CarState.MOTOR_TEMP_ID:
tx_message = int(CarState.PHASEC_TEMP)
tx_message = (tx_message << 32) + int(CarState.INTERNAL_TEMP)
write(currentID, tx_message)
CarState.infoIdx = (CarState.infoIdx + 1) % 7 # increment index
CarState.last_message = [hex(currentID), hex(tx_message)]
return CarState.last_message |
Python | def read():
"""Reads last message from CAN2 bus
Returns:
list: [ID, Message]
"""
return CarState.last_message | def read():
"""Reads last message from CAN2 bus
Returns:
list: [ID, Message]
"""
return CarState.last_message |
Python | def confirm_drive():
"""Acts as the motor controller confirming
that the proper message is received periodically.
If the message is received, the motor speed is changed
accordingly. If it is not received, the motor speed stays constant.
Returns:
tuple: desired and current velocities to display
"""
try:
id_, message, _ = read()
id_ = int(id_, 16)
message = int(message, 16)
if id_ == CarState.MOTOR_DRIVE_ID:
# Read the message and separate
desired_current = (message >> 32) & 0xFFFFFFFF
desired_velocity = message & 0xFFFFFFFF
toggle_torque(
desired_velocity
) # enable torque control mode if desired_velocity is an extremely large number
# update max available current value
if CarState.mode != 1:
CarState.CURRENT_SETPOINT = CarState.MAX_CURRENT * (
desired_current / 100.0
)
update_velocity(desired_velocity)
# Write the current velocity value
tx_message = int(CarState.CURRENT_VELOCITY) << 32 + int(
CarState.CURRENT_VELOCITY
)
write(CarState.VELOCITY_ID, tx_message)
return desired_velocity, CarState.CURRENT_VELOCITY
else:
return CarState.CURRENT_VELOCITY, CarState.CURRENT_VELOCITY
except ValueError:
return CarState.CURRENT_VELOCITY, CarState.CURRENT_VELOCITY | def confirm_drive():
"""Acts as the motor controller confirming
that the proper message is received periodically.
If the message is received, the motor speed is changed
accordingly. If it is not received, the motor speed stays constant.
Returns:
tuple: desired and current velocities to display
"""
try:
id_, message, _ = read()
id_ = int(id_, 16)
message = int(message, 16)
if id_ == CarState.MOTOR_DRIVE_ID:
# Read the message and separate
desired_current = (message >> 32) & 0xFFFFFFFF
desired_velocity = message & 0xFFFFFFFF
toggle_torque(
desired_velocity
) # enable torque control mode if desired_velocity is an extremely large number
# update max available current value
if CarState.mode != 1:
CarState.CURRENT_SETPOINT = CarState.MAX_CURRENT * (
desired_current / 100.0
)
update_velocity(desired_velocity)
# Write the current velocity value
tx_message = int(CarState.CURRENT_VELOCITY) << 32 + int(
CarState.CURRENT_VELOCITY
)
write(CarState.VELOCITY_ID, tx_message)
return desired_velocity, CarState.CURRENT_VELOCITY
else:
return CarState.CURRENT_VELOCITY, CarState.CURRENT_VELOCITY
except ValueError:
return CarState.CURRENT_VELOCITY, CarState.CURRENT_VELOCITY |
Python | def update_velocity(v):
"""Acts as the motor controller increasing
the speed of the motor
Args:
v (float): desired velocity received by the Controls system
"""
if CarState.CURRENT_VELOCITY < v:
CarState.CURRENT_VELOCITY += CarState.velocity_increase
elif CarState.CURRENT_VELOCITY > v:
CarState.CURRENT_VELOCITY -= 0.5 | def update_velocity(v):
"""Acts as the motor controller increasing
the speed of the motor
Args:
v (float): desired velocity received by the Controls system
"""
if CarState.CURRENT_VELOCITY < v:
CarState.CURRENT_VELOCITY += CarState.velocity_increase
elif CarState.CURRENT_VELOCITY > v:
CarState.CURRENT_VELOCITY -= 0.5 |
Python | def sendMotorDisable(message):
""" Simulates BPS by sending a message over CAN1
saying whether or not it is safe to turn on the motor.
Sends the MOTOR_DISABLE command with disable/enable argument.
message==1 means that it is safe to turn on the motor,
message==0 means that the motor should be turned off,
and all other messages are ignored.
"""
MDCommand = hex(266)
write(MDCommand, hex(message)) | def sendMotorDisable(message):
""" Simulates BPS by sending a message over CAN1
saying whether or not it is safe to turn on the motor.
Sends the MOTOR_DISABLE command with disable/enable argument.
message==1 means that it is safe to turn on the motor,
message==0 means that the motor should be turned off,
and all other messages are ignored.
"""
MDCommand = hex(266)
write(MDCommand, hex(message)) |
Python | def write(msg):
""" Writes the message to UART_2
Doesn't return anything
"""
# Creates file if it doesn't exist
os.makedirs(os.path.dirname(file), exist_ok=True)
if not os.path.exists(file):
with open(file, 'w'):
pass
lines = []
# Grab the current UART data
with open(file, "r") as csv:
fcntl.flock(csv.fileno(), fcntl.LOCK_EX)
lines = csv.readlines()
# If the file hasn't been initialzed yet, set the two entries to empty
length = len(lines)
if length < 2:
for i in range(length,2):
lines.append('\n')
# Write back the UART data, modifying the specified one
with open(file, "w") as csv:
for (i, line) in enumerate(lines):
if i == 1:
csv.write(msg)
csv.write('\n')
else:
csv.write(line)
fcntl.flock(csv.fileno(), fcntl.LOCK_UN) | def write(msg):
""" Writes the message to UART_2
Doesn't return anything
"""
# Creates file if it doesn't exist
os.makedirs(os.path.dirname(file), exist_ok=True)
if not os.path.exists(file):
with open(file, 'w'):
pass
lines = []
# Grab the current UART data
with open(file, "r") as csv:
fcntl.flock(csv.fileno(), fcntl.LOCK_EX)
lines = csv.readlines()
# If the file hasn't been initialzed yet, set the two entries to empty
length = len(lines)
if length < 2:
for i in range(length,2):
lines.append('\n')
# Write back the UART data, modifying the specified one
with open(file, "w") as csv:
for (i, line) in enumerate(lines):
if i == 1:
csv.write(msg)
csv.write('\n')
else:
csv.write(line)
fcntl.flock(csv.fileno(), fcntl.LOCK_UN) |
Python | def read():
"""Reads contactor file and returns 1 (on) or 0 (off)
Returns:
list: [motor_state, array_state]
"""
os.makedirs(os.path.dirname(file), exist_ok=True)
if not os.path.exists(file):
with open(file, "w"):
pass
states = []
with open(file, "r") as csvfile:
fcntl.flock(csvfile.fileno(), fcntl.LOCK_EX) # Lock file before reading
csvreader = csv.reader(csvfile)
for row in csvreader:
states.append(row)
fcntl.flock(csvfile.fileno(), fcntl.LOCK_UN)
try:
states = int(states[2][0])
except IndexError:
states = 0
return [(states >> MOTOR_PIN) & 0x01, (states >> ARRAY_PIN) & 0x01] | def read():
"""Reads contactor file and returns 1 (on) or 0 (off)
Returns:
list: [motor_state, array_state]
"""
os.makedirs(os.path.dirname(file), exist_ok=True)
if not os.path.exists(file):
with open(file, "w"):
pass
states = []
with open(file, "r") as csvfile:
fcntl.flock(csvfile.fileno(), fcntl.LOCK_EX) # Lock file before reading
csvreader = csv.reader(csvfile)
for row in csvreader:
states.append(row)
fcntl.flock(csvfile.fileno(), fcntl.LOCK_UN)
try:
states = int(states[2][0])
except IndexError:
states = 0
return [(states >> MOTOR_PIN) & 0x01, (states >> ARRAY_PIN) & 0x01] |
Python | def update_contactors():
"""Periodically update the display state of the Motor and Array Contactors"""
global motor_status, array_status
contactors_status = Contactor.read()
motor_status.set(f"Motor Contactor: {contactors_status[0]}")
array_status.set(f"Array Contactor: {contactors_status[1]}")
window.after(CONTACTOR_FREQ, update_contactors) | def update_contactors():
"""Periodically update the display state of the Motor and Array Contactors"""
global motor_status, array_status
contactors_status = Contactor.read()
motor_status.set(f"Motor Contactor: {contactors_status[0]}")
array_status.set(f"Array Contactor: {contactors_status[1]}")
window.after(CONTACTOR_FREQ, update_contactors) |
Python | def update_display():
"""Periodically update the display state of display"""
global display_text
display = Display.read()
for i, text in enumerate(display.keys()):
display_text[i].set(f"{text}: {display[text]}")
window.after(DISPLAY_FREQ, update_display) | def update_display():
"""Periodically update the display state of display"""
global display_text
display = Display.read()
for i, text in enumerate(display.keys()):
display_text[i].set(f"{text}: {display[text]}")
window.after(DISPLAY_FREQ, update_display) |
Python | def update_CAN():
"""Periodically update the display state of the CAN bus"""
global id_text, message_text
can = CAN.read()
id_text.set(f"ID: {can[0]}")
message_text.set(f"Message: {can[1]}")
window.after(CAN1_FREQ, update_CAN) | def update_CAN():
"""Periodically update the display state of the CAN bus"""
global id_text, message_text
can = CAN.read()
id_text.set(f"ID: {can[0]}")
message_text.set(f"Message: {can[1]}")
window.after(CAN1_FREQ, update_CAN) |
Python | def update_CAN2():
"""Periodally update the display state of the CAN2 bus"""
global id_text2, message_text2
can2 = MotorController.sendTouC()
id_text2.set(f"ID: {can2[0]}")
message_text2.set(f"Message: {can2[1]}")
window.after(MOTOR_FREQ, update_CAN2) | def update_CAN2():
"""Periodally update the display state of the CAN2 bus"""
global id_text2, message_text2
can2 = MotorController.sendTouC()
id_text2.set(f"ID: {can2[0]}")
message_text2.set(f"Message: {can2[1]}")
window.after(MOTOR_FREQ, update_CAN2) |
Python | def update_motor():
"""Periodically update the velocity and display of the motor"""
global desired_velocity_text, current_velocity_text
desired_velocity, current_velocity = MotorController.confirm_drive()
desired_velocity_text.set(f"Desired Velocity: {round(desired_velocity, 3)} m/s")
current_velocity_text.set(f"Current Velocity: {round(current_velocity, 3)} m/s")
MotorController.torque_control(accelerator.get())
window.after(MOTOR_FREQ, update_motor) | def update_motor():
"""Periodically update the velocity and display of the motor"""
global desired_velocity_text, current_velocity_text
desired_velocity, current_velocity = MotorController.confirm_drive()
desired_velocity_text.set(f"Desired Velocity: {round(desired_velocity, 3)} m/s")
current_velocity_text.set(f"Current Velocity: {round(current_velocity, 3)} m/s")
MotorController.torque_control(accelerator.get())
window.after(MOTOR_FREQ, update_motor) |
Python | def update_precharge():
"""Periodically update the display state of the Motor and Array precharge boards"""
global precharge_motor_status, precharge_array_status
precharge_status = PreCharge.read()
precharge_motor_status.set(f"Motor Precharge: {precharge_status[1]}")
precharge_array_status.set(f"Array Precharge: {precharge_status[0]}")
window.after(PRECHARGE_FREQ, update_precharge) | def update_precharge():
"""Periodically update the display state of the Motor and Array precharge boards"""
global precharge_motor_status, precharge_array_status
precharge_status = PreCharge.read()
precharge_motor_status.set(f"Motor Precharge: {precharge_status[1]}")
precharge_array_status.set(f"Array Precharge: {precharge_status[0]}")
window.after(PRECHARGE_FREQ, update_precharge) |
Python | def update_lights():
"""Periodically update the display state of the lights"""
global lights_text
lights_state = Lights.read()
lights = Lights.get_lights()
for i, light in enumerate(lights_text):
light.set(f"{lights[i]}: {(lights_state >> i) & 0x01}")
window.after(LIGHTS_FREQ, update_lights) | def update_lights():
"""Periodically update the display state of the lights"""
global lights_text
lights_state = Lights.read()
lights = Lights.get_lights()
for i, light in enumerate(lights_text):
light.set(f"{lights[i]}: {(lights_state >> i) & 0x01}")
window.after(LIGHTS_FREQ, update_lights) |
Python | def check_holdings(asset_id, address):
"""
Checks the asset balance for the specific address and asset id.
"""
account_info = client.account_info(address)
assets = account_info.get("assets")
for asset in assets:
if asset['asset-id'] == asset_id:
amount = asset.get("amount")
print("Account {} has {}.".format(address, balance_formatter(amount, asset_id, client)))
return
print("Account {} must opt-in to Asset ID {}.".format(address, asset_id)) | def check_holdings(asset_id, address):
"""
Checks the asset balance for the specific address and asset id.
"""
account_info = client.account_info(address)
assets = account_info.get("assets")
for asset in assets:
if asset['asset-id'] == asset_id:
amount = asset.get("amount")
print("Account {} has {}.".format(address, balance_formatter(amount, asset_id, client)))
return
print("Account {} must opt-in to Asset ID {}.".format(address, asset_id)) |
Python | def match_json():
""" Find JSON pairs from original json """
new_json = {}
for filename in os.listdir("data/sentiment/"):
username = filename[:-11]
back_json_file = filename[:-5] + "s.json"
print(filename)
print(username)
if filename.endswith(".json"):
with open("data/sentiment/" + filename) as f, \
open("data/backup_json/" + back_json_file) as f2:
sentiment_dict = json.load(f)
tweets = json.load(f2)
# search backup_json for tweet
for tweet_id, i in sentiment_dict.items():
profile_pic = ""
for tweet in tweets:
if tweet["id"] == int(tweet_id):
profile_pic = tweet["user"][
"profile_image_url_https"]
break
created_at = datetime.strptime(
tweet["created_at"], "%a %b %d %H:%M:%S +0000 %Y")
formatted_time = created_at.strftime("%Y-%m-%dT%H:%M:%S")
timestamp = time.mktime(created_at.timetuple())
new_json.setdefault(int(timestamp), [])
new_json[int(timestamp)].append(
{"username": username,
"profile_pic": profile_pic,
"created_at": formatted_time,
"text": sentiment_dict[tweet_id]["text"],
"sentiment": sentiment_dict[tweet_id]["sentiment"]})
# print(json.dumps(new_json, sort_keys=True, indent=4))
save_json(new_json, "merged_data.json") | def match_json():
""" Find JSON pairs from original json """
new_json = {}
for filename in os.listdir("data/sentiment/"):
username = filename[:-11]
back_json_file = filename[:-5] + "s.json"
print(filename)
print(username)
if filename.endswith(".json"):
with open("data/sentiment/" + filename) as f, \
open("data/backup_json/" + back_json_file) as f2:
sentiment_dict = json.load(f)
tweets = json.load(f2)
# search backup_json for tweet
for tweet_id, i in sentiment_dict.items():
profile_pic = ""
for tweet in tweets:
if tweet["id"] == int(tweet_id):
profile_pic = tweet["user"][
"profile_image_url_https"]
break
created_at = datetime.strptime(
tweet["created_at"], "%a %b %d %H:%M:%S +0000 %Y")
formatted_time = created_at.strftime("%Y-%m-%dT%H:%M:%S")
timestamp = time.mktime(created_at.timetuple())
new_json.setdefault(int(timestamp), [])
new_json[int(timestamp)].append(
{"username": username,
"profile_pic": profile_pic,
"created_at": formatted_time,
"text": sentiment_dict[tweet_id]["text"],
"sentiment": sentiment_dict[tweet_id]["sentiment"]})
# print(json.dumps(new_json, sort_keys=True, indent=4))
save_json(new_json, "merged_data.json") |
Python | def print_dates():
"""
Merged together tweets tweeted at the exact same time and add them
to a dictionary based on month
"""
with open("data/chunks/merged_data.json") as f:
merged_data = json.load(f)
total = 0
merge_on_month = {}
for timestamp in merged_data:
total += len(merged_data[timestamp])
new_date = datetime.fromtimestamp(int(timestamp)).strftime(
'%Y-%m')
merge_on_month.setdefault(new_date, [])
for tweet in merged_data[timestamp]:
merge_on_month[new_date].append(tweet)
# print(
# datetime.fromtimestamp(int(timestamp)).strftime(
# '%Y-%m-%dT%H:%M:%S'), len(merged_data[timestamp])
# )
month_chunks(merge_on_month) | def print_dates():
"""
Merged together tweets tweeted at the exact same time and add them
to a dictionary based on month
"""
with open("data/chunks/merged_data.json") as f:
merged_data = json.load(f)
total = 0
merge_on_month = {}
for timestamp in merged_data:
total += len(merged_data[timestamp])
new_date = datetime.fromtimestamp(int(timestamp)).strftime(
'%Y-%m')
merge_on_month.setdefault(new_date, [])
for tweet in merged_data[timestamp]:
merge_on_month[new_date].append(tweet)
# print(
# datetime.fromtimestamp(int(timestamp)).strftime(
# '%Y-%m-%dT%H:%M:%S'), len(merged_data[timestamp])
# )
month_chunks(merge_on_month) |
Python | def month_chunks(merge_on_month):
"""
This function splits tweets groups tweets based on the month they
were created.
Save chunks to json files in /chunks/
"""
for month in sorted(merge_on_month):
# months = {}
if not before_oct_3(month):
continue
# print(merge_on_month[month])
file_str = "data/chunks/{}.json".format(
month)
with open(file_str, 'w') as out:
json.dump(merge_on_month[month], out) | def month_chunks(merge_on_month):
"""
This function splits tweets groups tweets based on the month they
were created.
Save chunks to json files in /chunks/
"""
for month in sorted(merge_on_month):
# months = {}
if not before_oct_3(month):
continue
# print(merge_on_month[month])
file_str = "data/chunks/{}.json".format(
month)
with open(file_str, 'w') as out:
json.dump(merge_on_month[month], out) |
Python | def build_heatmap_data():
""" Build appropriate data format for a heatmap into a csv """
for filename in os.listdir("data/chunks/"):
if not filename.startswith("m") and filename.endswith(".json"):
month = filename[:-5]
with open("data/chunks/" + filename) as f, \
open("data/heatmap/{}.csv".format(month), 'w') as out:
writer = csv.writer(out)
writer.writerow(["day", "hour", "value"])
data = json.load(f)
map_vals = {}
for tweet in data:
temp_date = datetime.strptime(
tweet["created_at"], "%Y-%m-%dT%H:%M:%S")
map_vals.setdefault(temp_date.isoweekday(), {})
map_vals[temp_date.isoweekday()].setdefault(
temp_date.hour, 0)
map_vals[temp_date.isoweekday()][temp_date.hour] += 1
# print(temp_date.isoweekday(), temp_date.hour)
print(json.dumps(map_vals, indent=4))
print(filename)
for weekday in map_vals:
for hour in map_vals[weekday]:
writer.writerow(
[weekday, hour, map_vals[weekday][hour]]) | def build_heatmap_data():
""" Build appropriate data format for a heatmap into a csv """
for filename in os.listdir("data/chunks/"):
if not filename.startswith("m") and filename.endswith(".json"):
month = filename[:-5]
with open("data/chunks/" + filename) as f, \
open("data/heatmap/{}.csv".format(month), 'w') as out:
writer = csv.writer(out)
writer.writerow(["day", "hour", "value"])
data = json.load(f)
map_vals = {}
for tweet in data:
temp_date = datetime.strptime(
tweet["created_at"], "%Y-%m-%dT%H:%M:%S")
map_vals.setdefault(temp_date.isoweekday(), {})
map_vals[temp_date.isoweekday()].setdefault(
temp_date.hour, 0)
map_vals[temp_date.isoweekday()][temp_date.hour] += 1
# print(temp_date.isoweekday(), temp_date.hour)
print(json.dumps(map_vals, indent=4))
print(filename)
for weekday in map_vals:
for hour in map_vals[weekday]:
writer.writerow(
[weekday, hour, map_vals[weekday][hour]]) |
Python | def print_duplicates(timestamps):
"""
Unfortunately there are duplicates in almost every users tweet set.
This prints duplicate entries.
"""
seen = set()
not_uniq = []
for x in timestamps:
if x in seen:
not_uniq.append(x)
else:
seen.add(x)
print(not_uniq) | def print_duplicates(timestamps):
"""
Unfortunately there are duplicates in almost every users tweet set.
This prints duplicate entries.
"""
seen = set()
not_uniq = []
for x in timestamps:
if x in seen:
not_uniq.append(x)
else:
seen.add(x)
print(not_uniq) |
Python | def term_counts():
"""
Build bubble chart data from tweets.
Builds a corpus from the month of tweets then
looks at each user's tweets to check if a user used the top n-words
"""
stops = set(stopwords.words("english"))
for filename in os.listdir("data/chunks/"):
if not filename.startswith("m") and filename.endswith(".json"):
month = filename[:-5]
print(month)
hashtags = {}
k_words = {}
with open("data/chunks/{}".format(filename)) as f:
tweets = json.load(f)
for t in tweets:
d = t["text"]
user_pic = t["profile_pic"]
# print(d)
# get hashtags and add counts
temp_tags = extract_hash_tags(d)
for t in temp_tags:
if t:
hashtags.setdefault(
t, {"count": 0, "profile_imgs": []})
hashtags[t]["count"] += 1
if user_pic not in hashtags[t]["profile_imgs"]:
hashtags[t]["profile_imgs"].append(user_pic)
# get non-stopwords with no punctuations
# links or #/@ symbols
d = remove_links(d)
for word in d.split():
temp = trans_remove(word)
if temp and temp not in stops:
k_words.setdefault(
temp, {"count": 0, "profile_imgs": []})
k_words[temp]["count"] += 1
if user_pic not in k_words[temp]["profile_imgs"]:
k_words[temp]["profile_imgs"].append(user_pic)
# print(json.dumps(hashtags, sort_keys=True, indent=4))
# print(len(hashtags))
srt_tags = get_top_terms(hashtags, "count", 250)
srt_words = get_top_terms(k_words, "count", 250)
word_dump = {}
tag_dump = {}
for t in srt_words:
word_dump.setdefault(t, k_words[t])
for t in srt_tags:
tag_dump.setdefault(t, hashtags[t])
with open("data/clusters/{}.json".format(month), 'w') as out:
json.dump(word_dump, out)
with open("data/clusters/{}.tags.json".format(month), 'w') as out:
json.dump(tag_dump, out) | def term_counts():
"""
Build bubble chart data from tweets.
Builds a corpus from the month of tweets then
looks at each user's tweets to check if a user used the top n-words
"""
stops = set(stopwords.words("english"))
for filename in os.listdir("data/chunks/"):
if not filename.startswith("m") and filename.endswith(".json"):
month = filename[:-5]
print(month)
hashtags = {}
k_words = {}
with open("data/chunks/{}".format(filename)) as f:
tweets = json.load(f)
for t in tweets:
d = t["text"]
user_pic = t["profile_pic"]
# print(d)
# get hashtags and add counts
temp_tags = extract_hash_tags(d)
for t in temp_tags:
if t:
hashtags.setdefault(
t, {"count": 0, "profile_imgs": []})
hashtags[t]["count"] += 1
if user_pic not in hashtags[t]["profile_imgs"]:
hashtags[t]["profile_imgs"].append(user_pic)
# get non-stopwords with no punctuations
# links or #/@ symbols
d = remove_links(d)
for word in d.split():
temp = trans_remove(word)
if temp and temp not in stops:
k_words.setdefault(
temp, {"count": 0, "profile_imgs": []})
k_words[temp]["count"] += 1
if user_pic not in k_words[temp]["profile_imgs"]:
k_words[temp]["profile_imgs"].append(user_pic)
# print(json.dumps(hashtags, sort_keys=True, indent=4))
# print(len(hashtags))
srt_tags = get_top_terms(hashtags, "count", 250)
srt_words = get_top_terms(k_words, "count", 250)
word_dump = {}
tag_dump = {}
for t in srt_words:
word_dump.setdefault(t, k_words[t])
for t in srt_tags:
tag_dump.setdefault(t, hashtags[t])
with open("data/clusters/{}.json".format(month), 'w') as out:
json.dump(word_dump, out)
with open("data/clusters/{}.tags.json".format(month), 'w') as out:
json.dump(tag_dump, out) |
Python | def saveCSV(alltweets, screen_name):
"""
transform the tweepy tweets into a 2D array that will populate the csv
"""
outtweets = [[tweet.id_str, tweet.created_at,
tweet.text.encode("utf-8")]
for tweet in alltweets]
with open('data/%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text"])
writer.writerows(outtweets) | def saveCSV(alltweets, screen_name):
"""
transform the tweepy tweets into a 2D array that will populate the csv
"""
outtweets = [[tweet.id_str, tweet.created_at,
tweet.text.encode("utf-8")]
for tweet in alltweets]
with open('data/%s_tweets.csv' % screen_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id", "created_at", "text"])
writer.writerows(outtweets) |
Python | def save_backupJson(alltweets, screen_name):
'''
In case the data saved to csv for presentation is not enough,
backup entire json response from twitter. Not to be stored in
public repository
'''
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [tweet._json
for tweet in alltweets]
with open('data/backup_json/%s_tweets.json' % screen_name, 'w') as f:
json.dump(outtweets, f) | def save_backupJson(alltweets, screen_name):
'''
In case the data saved to csv for presentation is not enough,
backup entire json response from twitter. Not to be stored in
public repository
'''
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [tweet._json
for tweet in alltweets]
with open('data/backup_json/%s_tweets.json' % screen_name, 'w') as f:
json.dump(outtweets, f) |
Python | def translate_csvs():
"""
Using googletrans library. Hits rate limit or captcha
after 20 requests in a row.
"""
for filename in os.listdir("data/"):
username = filename[:-4]
if os.path.isfile("data/translations/" + username + ".json"):
continue
if filename.endswith(".csv"):
with open("data/" + filename, 'r') as f:
print("File:", filename)
reader = csv.reader(f)
next(reader)
data = []
for row in reader:
temp = {}
temp["id"] = row[0]
temp["datetime"] = row[1]
bytes_string = ast.literal_eval(row[2])
sentence = bytes_string.decode('utf-8').rstrip()
temp["orig"] = sentence
# sleep for 1 seconds between requests
time.sleep(1)
# keep running until it finally translates
translated_flag = True
translation = ""
while(translated_flag):
try:
translation = translate(sentence)
translated_flag = False
except json.decoder.JSONDecodeError:
print("Hit rate limit. Sleeping for 2 minutes")
time.sleep(60 * 2)
print(translation)
temp["translation"] = translation.text
data.append(temp)
dump_data_if_not_exists(
"data/translations/", username + ".json", data)
# stop after each file
exit()
else:
continue | def translate_csvs():
"""
Using googletrans library. Hits rate limit or captcha
after 20 requests in a row.
"""
for filename in os.listdir("data/"):
username = filename[:-4]
if os.path.isfile("data/translations/" + username + ".json"):
continue
if filename.endswith(".csv"):
with open("data/" + filename, 'r') as f:
print("File:", filename)
reader = csv.reader(f)
next(reader)
data = []
for row in reader:
temp = {}
temp["id"] = row[0]
temp["datetime"] = row[1]
bytes_string = ast.literal_eval(row[2])
sentence = bytes_string.decode('utf-8').rstrip()
temp["orig"] = sentence
# sleep for 1 seconds between requests
time.sleep(1)
# keep running until it finally translates
translated_flag = True
translation = ""
while(translated_flag):
try:
translation = translate(sentence)
translated_flag = False
except json.decoder.JSONDecodeError:
print("Hit rate limit. Sleeping for 2 minutes")
time.sleep(60 * 2)
print(translation)
temp["translation"] = translation.text
data.append(temp)
dump_data_if_not_exists(
"data/translations/", username + ".json", data)
# stop after each file
exit()
else:
continue |
Python | def translate(sentence):
"""
googletrans autodetect language option.
Process one at a time to avoid limit rates/captchas
"""
translator = Translator()
translation = translator.translate(sentence, dest='en')
# translations = translator.translate(sentence_list, dest='en')
return translation | def translate(sentence):
"""
googletrans autodetect language option.
Process one at a time to avoid limit rates/captchas
"""
translator = Translator()
translation = translator.translate(sentence, dest='en')
# translations = translator.translate(sentence_list, dest='en')
return translation |
Python | def __set_home_dir_ownership():
import params
"""
Updates the Hue user home directory to be owned by hue:hue.
"""
if not os.path.exists("/home/{0}".format(params.hue_user)):
Directory(params.hue_local_home_dir,
mode=0700,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
) | def __set_home_dir_ownership():
import params
"""
Updates the Hue user home directory to be owned by hue:hue.
"""
if not os.path.exists("/home/{0}".format(params.hue_user)):
Directory(params.hue_local_home_dir,
mode=0700,
cd_access='a',
owner=params.hue_user,
group=params.hue_group,
create_parents=True
) |
Python | def download_hue():
import params
"""
Download Hue to the installation directory
"""
Execute('{0} | xargs wget -O hue.tgz'.format(params.download_url))
Execute('tar -zxvf hue.tgz -C {0} && rm -f hue.tgz'.format(params.hue_install_dir))
# Ensure all Hue files owned by hue
Execute('chown -R {0}:{1} {2}'.format(params.hue_user,params.hue_group,params.hue_dir))
Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
Logger.info("Hue Service is installed") | def download_hue():
import params
"""
Download Hue to the installation directory
"""
Execute('{0} | xargs wget -O hue.tgz'.format(params.download_url))
Execute('tar -zxvf hue.tgz -C {0} && rm -f hue.tgz'.format(params.hue_install_dir))
# Ensure all Hue files owned by hue
Execute('chown -R {0}:{1} {2}'.format(params.hue_user,params.hue_group,params.hue_dir))
Execute('ln -s {0} /usr/hdp/current/hue-server'.format(params.hue_dir))
Logger.info("Hue Service is installed") |
Python | def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Performs the tasks that should be done before an upgrade of oozie. This includes:
- backing up configurations
- running hdp-select and conf-select
- restoring configurations
- preparing the libext directory
:param env:
:return:
"""
import params
env.set_params(params) | def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Performs the tasks that should be done before an upgrade of oozie. This includes:
- backing up configurations
- running hdp-select and conf-select
- restoring configurations
- preparing the libext directory
:param env:
:return:
"""
import params
env.set_params(params) |
Python | def oozie_service(action = 'start', upgrade_type=None):
"""
Starts or stops the Oozie service
:param action: 'start' or 'stop'
:param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
skipped since a variation of them was performed during the rolling upgrade
:return:
"""
import params
environment={'OOZIE_CONFIG': params.conf_dir}
if params.security_enabled:
if params.oozie_principal is None:
oozie_principal_with_host = 'missing_principal'
else:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
else:
kinit_if_needed = ""
no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
if action == 'start':
start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh start")
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
else:
db_connection_check_command = None
if upgrade_type is None:
if not os.path.isfile(params.target) and params.jdbc_driver_name == "org.postgresql.Driver":
print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
"1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
"3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
"{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
"/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
exit(1)
if db_connection_check_command:
Execute( db_connection_check_command,
tries=5,
try_sleep=10,
user=params.oozie_user,
)
Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
user = params.oozie_user, not_if = no_op_test,
ignore_failures = True
)
if params.security_enabled:
Execute(kinit_if_needed,
user = params.oozie_user,
)
if params.host_sys_prepped:
print "Skipping creation of oozie sharelib as host is sys prepped"
hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
# check with webhdfs is much faster than executing hadoop fs -ls.
util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
hdfs_share_dir_exists = ('FileStatus' in list_status)
else:
# have to do time expensive hadoop fs -ls check.
hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
user=params.oozie_user)[0]
if not hdfs_share_dir_exists:
Execute( params.put_shared_lib_to_hdfs_cmd,
user = 'root',
path = params.execute_path
)
params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
type="directory",
action="create_on_execute",
mode=0755,
recursive_chmod=True,
)
params.HdfsResource(None, action="execute")
# start oozie
Execute( start_cmd, environment=environment, user = params.oozie_user,
not_if = no_op_test )
Execute( params.update_sharelib_cmd,
user = params.oozie_user,
path = params.execute_path,
ignore_failures=True
)
elif action == 'stop':
stop_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop")
# stop oozie
Execute(stop_cmd, environment=environment, only_if = no_op_test,
user = params.oozie_user)
File(params.pid_file, action = "delete") | def oozie_service(action = 'start', upgrade_type=None):
"""
Starts or stops the Oozie service
:param action: 'start' or 'stop'
:param upgrade_type: type of upgrade, either "rolling" or "non_rolling"
skipped since a variation of them was performed during the rolling upgrade
:return:
"""
import params
environment={'OOZIE_CONFIG': params.conf_dir}
if params.security_enabled:
if params.oozie_principal is None:
oozie_principal_with_host = 'missing_principal'
else:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host};")
else:
kinit_if_needed = ""
no_op_test = as_user(format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1"), user=params.oozie_user)
if action == 'start':
start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh start")
if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
params.jdbc_driver_name == "com.microsoft.sqlserver.jdbc.SQLServerDriver" or \
params.jdbc_driver_name == "org.postgresql.Driver" or \
params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{target} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
else:
db_connection_check_command = None
if upgrade_type is None:
if not os.path.isfile(params.target) and params.jdbc_driver_name == "org.postgresql.Driver":
print format("ERROR: jdbc file {target} is unavailable. Please, follow next steps:\n" \
"1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
"3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
"{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
"/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
exit(1)
if db_connection_check_command:
Execute( db_connection_check_command,
tries=5,
try_sleep=10,
user=params.oozie_user,
)
Execute( format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run"),
user = params.oozie_user, not_if = no_op_test,
ignore_failures = True
)
if params.security_enabled:
Execute(kinit_if_needed,
user = params.oozie_user,
)
if params.host_sys_prepped:
print "Skipping creation of oozie sharelib as host is sys prepped"
hdfs_share_dir_exists = True # skip time-expensive hadoop fs -ls check
elif WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
# check with webhdfs is much faster than executing hadoop fs -ls.
util = WebHDFSUtil(params.hdfs_site, params.oozie_user, params.security_enabled)
list_status = util.run_command(params.hdfs_share_dir, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False)
hdfs_share_dir_exists = ('FileStatus' in list_status)
else:
# have to do time expensive hadoop fs -ls check.
hdfs_share_dir_exists = shell.call(format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls {hdfs_share_dir} | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
user=params.oozie_user)[0]
if not hdfs_share_dir_exists:
Execute( params.put_shared_lib_to_hdfs_cmd,
user = 'root',
path = params.execute_path
)
params.HdfsResource(format("{oozie_hdfs_user_dir}/share"),
type="directory",
action="create_on_execute",
mode=0755,
recursive_chmod=True,
)
params.HdfsResource(None, action="execute")
# start oozie
Execute( start_cmd, environment=environment, user = params.oozie_user,
not_if = no_op_test )
Execute( params.update_sharelib_cmd,
user = params.oozie_user,
path = params.execute_path,
ignore_failures=True
)
elif action == 'stop':
stop_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozied.sh stop")
# stop oozie
Execute(stop_cmd, environment=environment, only_if = no_op_test,
user = params.oozie_user)
File(params.pid_file, action = "delete") |
Python | def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Execute hdp-select before reconfiguring this client to the new HDP version.
:param env:
:param upgrade_type:
:return:
"""
Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
import params
env.set_params(params)
# this function should not execute if the version can't be determined or
# is not at least HDP 2.2.0.0
if not params.version or compare_versions(params.version, "2.2", format=True) < 0:
return | def pre_upgrade_restart(self, env, upgrade_type=None):
"""
Execute hdp-select before reconfiguring this client to the new HDP version.
:param env:
:param upgrade_type:
:return:
"""
Logger.info("Executing Hive HCat Client Stack Upgrade pre-restart")
import params
env.set_params(params)
# this function should not execute if the version can't be determined or
# is not at least HDP 2.2.0.0
if not params.version or compare_versions(params.version, "2.2", format=True) < 0:
return |
Python | def prepare_warfile():
"""
Invokes the 'prepare-war' command in Oozie in order to create the WAR.
The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
both of these environment variables must point to the upgraded oozie-server path and
not oozie-client since it was not yet updated.
This method will also perform a kinit if necessary.
:return:
"""
import params
# get the kerberos token if necessary to execute commands as oozie
if params.security_enabled:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
Execute(command, user=params.oozie_user, logoutput=True)
# setup environment
environment = { "CATALINA_BASE" : "/usr/hdp/current/oozie-server/oozie-server",
"OOZIE_HOME" : "/usr/hdp/current/oozie-server" }
# prepare the oozie WAR
command = format("{oozie_setup_sh} prepare-war {oozie_secure} -d {oozie_libext_dir}")
return_code, oozie_output = shell.call(command, user=params.oozie_user,
logoutput=False, quiet=False, env=environment)
# set it to "" in to prevent a possible iteration issue
if oozie_output is None:
oozie_output = ""
if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
Logger.error(message)
raise Fail(message) | def prepare_warfile():
"""
Invokes the 'prepare-war' command in Oozie in order to create the WAR.
The prepare-war command uses the input WAR from ${OOZIE_HOME}/oozie.war and
outputs the prepared WAR to ${CATALINA_BASE}/webapps/oozie.war - because of this,
both of these environment variables must point to the upgraded oozie-server path and
not oozie-client since it was not yet updated.
This method will also perform a kinit if necessary.
:return:
"""
import params
# get the kerberos token if necessary to execute commands as oozie
if params.security_enabled:
oozie_principal_with_host = params.oozie_principal.replace("_HOST", params.hostname)
command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
Execute(command, user=params.oozie_user, logoutput=True)
# setup environment
environment = { "CATALINA_BASE" : "/usr/hdp/current/oozie-server/oozie-server",
"OOZIE_HOME" : "/usr/hdp/current/oozie-server" }
# prepare the oozie WAR
command = format("{oozie_setup_sh} prepare-war {oozie_secure} -d {oozie_libext_dir}")
return_code, oozie_output = shell.call(command, user=params.oozie_user,
logoutput=False, quiet=False, env=environment)
# set it to "" in to prevent a possible iteration issue
if oozie_output is None:
oozie_output = ""
if return_code != 0 or "New Oozie WAR file with added".lower() not in oozie_output.lower():
message = "Unexpected Oozie WAR preparation output {0}".format(oozie_output)
Logger.error(message)
raise Fail(message) |
Python | def link_configs(struct_out_file):
"""
Links configs, only on a fresh install of HDP-2.3 and higher
"""
if True:
Logger.info("=====================================================")
return
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("Could not load 'version' from {0}".format(struct_out_file))
return
for k, v in conf_select.PACKAGE_DIRS.iteritems():
_link_configs(k, json_version, v) | def link_configs(struct_out_file):
"""
Links configs, only on a fresh install of HDP-2.3 and higher
"""
if True:
Logger.info("=====================================================")
return
json_version = load_version(struct_out_file)
if not json_version:
Logger.info("Could not load 'version' from {0}".format(struct_out_file))
return
for k, v in conf_select.PACKAGE_DIRS.iteritems():
_link_configs(k, json_version, v) |
Python | def _link_configs(package, version, dirs):
"""
Link a specific package's configuration directory
"""
bad_dirs = []
for dir_def in dirs:
if not os.path.exists(dir_def['conf_dir']):
bad_dirs.append(dir_def['conf_dir'])
if len(bad_dirs) > 0:
Logger.debug("Skipping {0} as it does not exist.".format(",".join(bad_dirs)))
return
bad_dirs = []
for dir_def in dirs:
# check if conf is a link already
old_conf = dir_def['conf_dir']
if os.path.islink(old_conf):
Logger.debug("{0} is a link to {1}".format(old_conf, os.path.realpath(old_conf)))
bad_dirs.append(old_conf)
if len(bad_dirs) > 0:
return
# make backup dir and copy everything in case configure() was called after install()
for dir_def in dirs:
old_conf = dir_def['conf_dir']
old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
old_conf_copy = os.path.join(old_parent, "conf.install")
Execute(("cp", "-R", "-p", old_conf, old_conf_copy),
not_if = format("test -e {old_conf_copy}"), sudo = True)
# we're already in the HDP stack
versioned_confs = conf_select.create("HDP", package, version, dry_run = True)
Logger.info("New conf directories: {0}".format(", ".join(versioned_confs)))
need_dirs = []
for d in versioned_confs:
if not os.path.exists(d):
need_dirs.append(d)
if len(need_dirs) > 0:
conf_select.create("HDP", package, version)
# find the matching definition and back it up (not the most efficient way) ONLY if there is more than one directory
if len(dirs) > 1:
for need_dir in need_dirs:
for dir_def in dirs:
if 'prefix' in dir_def and need_dir.startswith(dir_def['prefix']):
old_conf = dir_def['conf_dir']
versioned_conf = need_dir
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
elif 1 == len(dirs) and 1 == len(need_dirs):
old_conf = dirs[0]['conf_dir']
versioned_conf = need_dirs[0]
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
# make /usr/hdp/[version]/[component]/conf point to the versioned config.
# /usr/hdp/current is already set
try:
conf_select.select("HDP", package, version)
# no more references to /etc/[component]/conf
for dir_def in dirs:
Directory(dir_def['conf_dir'], action="delete")
# link /etc/[component]/conf -> /usr/hdp/current/[component]-client/conf
Link(dir_def['conf_dir'], to = dir_def['current_dir'])
except Exception, e:
Logger.warning("Could not select the directory: {0}".format(e.message))
# should conf.install be removed? | def _link_configs(package, version, dirs):
"""
Link a specific package's configuration directory
"""
bad_dirs = []
for dir_def in dirs:
if not os.path.exists(dir_def['conf_dir']):
bad_dirs.append(dir_def['conf_dir'])
if len(bad_dirs) > 0:
Logger.debug("Skipping {0} as it does not exist.".format(",".join(bad_dirs)))
return
bad_dirs = []
for dir_def in dirs:
# check if conf is a link already
old_conf = dir_def['conf_dir']
if os.path.islink(old_conf):
Logger.debug("{0} is a link to {1}".format(old_conf, os.path.realpath(old_conf)))
bad_dirs.append(old_conf)
if len(bad_dirs) > 0:
return
# make backup dir and copy everything in case configure() was called after install()
for dir_def in dirs:
old_conf = dir_def['conf_dir']
old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
old_conf_copy = os.path.join(old_parent, "conf.install")
Execute(("cp", "-R", "-p", old_conf, old_conf_copy),
not_if = format("test -e {old_conf_copy}"), sudo = True)
# we're already in the HDP stack
versioned_confs = conf_select.create("HDP", package, version, dry_run = True)
Logger.info("New conf directories: {0}".format(", ".join(versioned_confs)))
need_dirs = []
for d in versioned_confs:
if not os.path.exists(d):
need_dirs.append(d)
if len(need_dirs) > 0:
conf_select.create("HDP", package, version)
# find the matching definition and back it up (not the most efficient way) ONLY if there is more than one directory
if len(dirs) > 1:
for need_dir in need_dirs:
for dir_def in dirs:
if 'prefix' in dir_def and need_dir.startswith(dir_def['prefix']):
old_conf = dir_def['conf_dir']
versioned_conf = need_dir
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
elif 1 == len(dirs) and 1 == len(need_dirs):
old_conf = dirs[0]['conf_dir']
versioned_conf = need_dirs[0]
Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
only_if = format("ls {old_conf}/*"))
# make /usr/hdp/[version]/[component]/conf point to the versioned config.
# /usr/hdp/current is already set
try:
conf_select.select("HDP", package, version)
# no more references to /etc/[component]/conf
for dir_def in dirs:
Directory(dir_def['conf_dir'], action="delete")
# link /etc/[component]/conf -> /usr/hdp/current/[component]-client/conf
Link(dir_def['conf_dir'], to = dir_def['current_dir'])
except Exception, e:
Logger.warning("Could not select the directory: {0}".format(e.message))
# should conf.install be removed? |
Python | def download_database_library_if_needed(target_directory = None):
"""
Downloads the library to use when connecting to the Oozie database, if
necessary. The library will be downloaded to 'params.target' unless
otherwise specified.
:param target_directory: the location where the database library will be
downloaded to.
:return:
"""
import params
jdbc_drivers = ["com.mysql.jdbc.Driver",
"com.microsoft.sqlserver.jdbc.SQLServerDriver",
"oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
# check to see if the JDBC driver name is in the list of ones that need to
# be downloaded
if params.jdbc_driver_name not in jdbc_drivers:
return
# if the target directory is not specified
if target_directory is None:
target_jar_with_directory = params.target
else:
# create the full path using the supplied target directory and the JDBC JAR
target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
if not os.path.exists(target_jar_with_directory):
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
path=["/bin", "/usr/bin/"],
sudo = True)
File(target_jar_with_directory, owner = params.oozie_user,
group = params.user_group) | def download_database_library_if_needed(target_directory = None):
"""
Downloads the library to use when connecting to the Oozie database, if
necessary. The library will be downloaded to 'params.target' unless
otherwise specified.
:param target_directory: the location where the database library will be
downloaded to.
:return:
"""
import params
jdbc_drivers = ["com.mysql.jdbc.Driver",
"com.microsoft.sqlserver.jdbc.SQLServerDriver",
"oracle.jdbc.driver.OracleDriver","sap.jdbc4.sqlanywhere.IDriver"]
# check to see if the JDBC driver name is in the list of ones that need to
# be downloaded
if params.jdbc_driver_name not in jdbc_drivers:
return
# if the target directory is not specified
if target_directory is None:
target_jar_with_directory = params.target
else:
# create the full path using the supplied target directory and the JDBC JAR
target_jar_with_directory = target_directory + os.path.sep + params.jdbc_driver_jar
if not os.path.exists(target_jar_with_directory):
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {oozie_libext_dir}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {oozie_user}:{user_group} {oozie_libext_dir}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
path=["/bin", "/usr/bin/"],
sudo = True)
File(target_jar_with_directory, owner = params.oozie_user,
group = params.user_group) |
Python | def dashboard(host, port, csv, excel, delimiter, sheet_name):
"""Starts the CSV Plot dashboard.
Loads either a --csv or --excel file for plotting. If neither of these options are given, the built-in Titanic dataset is loaded."""
df, name = load_data(csv, delimiter, excel, sheet_name)
kwargs = {
'csv': csv,
'excel': excel,
'delimiter': delimiter,
'sheet_name': sheet_name,
}
dashboard(host, port, df, name, kwargs) | def dashboard(host, port, csv, excel, delimiter, sheet_name):
"""Starts the CSV Plot dashboard.
Loads either a --csv or --excel file for plotting. If neither of these options are given, the built-in Titanic dataset is loaded."""
df, name = load_data(csv, delimiter, excel, sheet_name)
kwargs = {
'csv': csv,
'excel': excel,
'delimiter': delimiter,
'sheet_name': sheet_name,
}
dashboard(host, port, df, name, kwargs) |
Python | def table_background_gradient(s, m, M, cmap='PuBu', low=0, high=0):
"""The background gradient is per table, instead of either per row or per column"""
# https://stackoverflow.com/questions/38931566/pandas-style-background-gradient-both-rows-and-columns
rng = M - m
norm = colors.Normalize(m - (rng * low),
M + (rng * high))
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: %s' % color for color in c] | def table_background_gradient(s, m, M, cmap='PuBu', low=0, high=0):
"""The background gradient is per table, instead of either per row or per column"""
# https://stackoverflow.com/questions/38931566/pandas-style-background-gradient-both-rows-and-columns
rng = M - m
norm = colors.Normalize(m - (rng * low),
M + (rng * high))
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: %s' % color for color in c] |
Python | def resolver(cls, var_name: str) -> FunctionType:
"""
Variable resolver decorator. Function or method decorated with it is
used to resolve the config variable.
.. note::
Variable is resolved only once.
Next gets are returned from the cache.
:param var_name: Variable name
:return: Function decorator
"""
def dec(f):
if var_name in cls().resolvers:
raise ConfigurationError(
f'Resolver for {var_name} already registered')
cls().resolvers[var_name] = f
return f
return dec | def resolver(cls, var_name: str) -> FunctionType:
"""
Variable resolver decorator. Function or method decorated with it is
used to resolve the config variable.
.. note::
Variable is resolved only once.
Next gets are returned from the cache.
:param var_name: Variable name
:return: Function decorator
"""
def dec(f):
if var_name in cls().resolvers:
raise ConfigurationError(
f'Resolver for {var_name} already registered')
cls().resolvers[var_name] = f
return f
return dec |
Python | def env_resolver(cls, var_name: str, env_name: str = None,
default: Any = _NONE) -> 'Configuration':
"""
Method for configuring environment resolver.
:param var_name: Variable name
:param env_name: An optional environment variable name. If not set\
haps looks for `HAPS_var_name`
:param default: Default value for variable. If it's a callable,\
it is called before return. If not provided\
:class:`~haps.exceptions.UnknownConfigVariable` is raised
:return: :class:`~haps.config.Configuration` instance for easy\
chaining
"""
cls.resolver(var_name)(
partial(
_env_resolver, var_name=var_name, env_name=env_name,
default=default))
return cls() | def env_resolver(cls, var_name: str, env_name: str = None,
default: Any = _NONE) -> 'Configuration':
"""
Method for configuring environment resolver.
:param var_name: Variable name
:param env_name: An optional environment variable name. If not set\
haps looks for `HAPS_var_name`
:param default: Default value for variable. If it's a callable,\
it is called before return. If not provided\
:class:`~haps.exceptions.UnknownConfigVariable` is raised
:return: :class:`~haps.config.Configuration` instance for easy\
chaining
"""
cls.resolver(var_name)(
partial(
_env_resolver, var_name=var_name, env_name=env_name,
default=default))
return cls() |
Python | def configure(cls, config: Configuration) -> None:
"""
Method for configure haps application.
This method is invoked before autodiscover.
:param config: Configuration instance
"""
pass | def configure(cls, config: Configuration) -> None:
"""
Method for configure haps application.
This method is invoked before autodiscover.
:param config: Configuration instance
"""
pass |
Python | def autodiscover(cls,
module_paths: List[str],
subclass: 'Container' = None) -> None:
"""
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
"""
def find_base(bases: set, implementation: Type):
found = {b for b in bases if issubclass(implementation, b)}
if not found:
raise ConfigurationError(
"No base defined for %r" % implementation)
elif len(found) > 1:
raise ConfigurationError(
"More than one base found for %r" % implementation)
else:
return found.pop()
def walk(pkg: Union[str, ModuleType]) -> Dict[str, ModuleType]:
if isinstance(pkg, str):
pkg: ModuleType = importlib.import_module(pkg)
results = {}
try:
path = pkg.__path__
except AttributeError:
results[pkg.__name__] = importlib.import_module(pkg.__name__)
else:
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = pkg.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(walk(full_name))
return results
with cls._lock:
for module_path in module_paths:
walk(module_path)
config: List[Egg] = []
for egg_ in egg.factories:
base_ = find_base(base.classes, egg_.type_)
egg_.base_ = base_
config.append(egg_)
cls.configure(config, subclass=subclass) | def autodiscover(cls,
module_paths: List[str],
subclass: 'Container' = None) -> None:
"""
Load all modules automatically and find bases and eggs.
:param module_paths: List of paths that should be discovered
:param subclass: Optional Container subclass that should be used
"""
def find_base(bases: set, implementation: Type):
found = {b for b in bases if issubclass(implementation, b)}
if not found:
raise ConfigurationError(
"No base defined for %r" % implementation)
elif len(found) > 1:
raise ConfigurationError(
"More than one base found for %r" % implementation)
else:
return found.pop()
def walk(pkg: Union[str, ModuleType]) -> Dict[str, ModuleType]:
if isinstance(pkg, str):
pkg: ModuleType = importlib.import_module(pkg)
results = {}
try:
path = pkg.__path__
except AttributeError:
results[pkg.__name__] = importlib.import_module(pkg.__name__)
else:
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = pkg.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(walk(full_name))
return results
with cls._lock:
for module_path in module_paths:
walk(module_path)
config: List[Egg] = []
for egg_ in egg.factories:
base_ = find_base(base.classes, egg_.type_)
egg_.base_ = base_
config.append(egg_)
cls.configure(config, subclass=subclass) |
Python | def register_scope(self, name: str, scope_class: Type[Scope]) -> None:
"""
Register new scopes which should be subclasses of `Scope`
:param name: Name of new scopes
:param scope_class: Class of new scopes
"""
with self._lock:
if name in self.scopes:
raise AlreadyConfigured(f'Scope {name} already registered')
self.scopes[name] = scope_class() | def register_scope(self, name: str, scope_class: Type[Scope]) -> None:
"""
Register new scopes which should be subclasses of `Scope`
:param name: Name of new scopes
:param scope_class: Class of new scopes
"""
with self._lock:
if name in self.scopes:
raise AlreadyConfigured(f'Scope {name} already registered')
self.scopes[name] = scope_class() |
Python | def base(cls: T) -> T:
"""
A class decorator that marks class as a base type.
:param cls: Some base type
:return: Not modified `cls`
"""
base.classes.add(cls)
return cls | def base(cls: T) -> T:
"""
A class decorator that marks class as a base type.
:param cls: Some base type
:return: Not modified `cls`
"""
base.classes.add(cls)
return cls |
Python | def scope(scope_type: str) -> Callable:
"""
A function that returns decorator that set scopes to some class/function
.. code-block:: python
@egg()
@scopes(SINGLETON_SCOPE)
class DepImpl:
pass
:param scope_type: Which scope should be used
:return:
"""
def dec(egg_: T) -> T:
egg_.__haps_custom_scope = scope_type
return egg_
return dec | def scope(scope_type: str) -> Callable:
"""
A function that returns decorator that set scopes to some class/function
.. code-block:: python
@egg()
@scopes(SINGLETON_SCOPE)
class DepImpl:
pass
:param scope_type: Which scope should be used
:return:
"""
def dec(egg_: T) -> T:
egg_.__haps_custom_scope = scope_type
return egg_
return dec |
Python | def _check_inputs(self, x1, x2, params):
r"""
Common function for checking dimensions of inputs
This function checks the inputs to any kernel evaluation for consistency and ensures
that all input arrays have the correct dimensionality. It returns the reformatted
arrays, the number of inputs, and the number of hyperparameters. If the method
determines that the array dimensions are not all consistent with one another,
it will raise an ``AssertionError``. This method is called internally whenever
the kernel is evaluated.
:param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable
if either there is only a single point, or each point has only a single
parameter). If there is more than one parameter, the last dimension
must match the last dimension of ``x2`` and be one less than the length
of ``params``.
:type x1: array-like
:param x2: Second parameter array. The same restrictions apply that hold for ``x1``
described above.
:type x2: array-like
:param params: Hyperparameter array. Must have a length that is one more than the
last dimension of ``x1`` and ``x2``, hence minimum length is 2.
:type params: array-like
:returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted
``x2``, ``n2``, ``params``, and ``D``. ``x1`` will be an array with
dimensions ``(n1, D - 1)``, ``x2`` will be an array with dimensions
``(n2, D - 1)``, and ``params`` will be an array with dimensions ``(D,)``.
``n1``, ``n2``, and ``D`` will be integers.
"""
params = np.array(params)
assert params.ndim == 1, "parameters must be a vector"
D = len(params)
assert D >= 2, "minimum number of parameters in a covariance kernel is 2"
x1 = np.array(x1)
assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1"
if x1.ndim == 2:
assert x1.shape[1] == D - 1, "bad shape for x1"
else:
if D == 2:
x1 = np.reshape(x1, (len(x1), 1))
else:
x1 = np.reshape(x1, (1, D - 1))
n1 = x1.shape[0]
x2 = np.array(x2)
assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2"
if x2.ndim == 2:
assert x2.shape[1] == D - 1, "bad shape for x2"
else:
if D == 2:
x2 = np.reshape(x2, (len(x2), 1))
else:
x2 = np.reshape(x2, (1, D - 1))
n2 = x2.shape[0]
return x1, n1, x2, n2, params, D | def _check_inputs(self, x1, x2, params):
r"""
Common function for checking dimensions of inputs
This function checks the inputs to any kernel evaluation for consistency and ensures
that all input arrays have the correct dimensionality. It returns the reformatted
arrays, the number of inputs, and the number of hyperparameters. If the method
determines that the array dimensions are not all consistent with one another,
it will raise an ``AssertionError``. This method is called internally whenever
the kernel is evaluated.
:param x1: First parameter array. Should be a 1-D or 2-D array (1-D is acceptable
if either there is only a single point, or each point has only a single
parameter). If there is more than one parameter, the last dimension
must match the last dimension of ``x2`` and be one less than the length
of ``params``.
:type x1: array-like
:param x2: Second parameter array. The same restrictions apply that hold for ``x1``
described above.
:type x2: array-like
:param params: Hyperparameter array. Must have a length that is one more than the
last dimension of ``x1`` and ``x2``, hence minimum length is 2.
:type params: array-like
:returns: A tuple containing the following: reformatted ``x1``, ``n1``, reformatted
``x2``, ``n2``, ``params``, and ``D``. ``x1`` will be an array with
dimensions ``(n1, D - 1)``, ``x2`` will be an array with dimensions
``(n2, D - 1)``, and ``params`` will be an array with dimensions ``(D,)``.
``n1``, ``n2``, and ``D`` will be integers.
"""
params = np.array(params)
assert params.ndim == 1, "parameters must be a vector"
D = len(params)
assert D >= 2, "minimum number of parameters in a covariance kernel is 2"
x1 = np.array(x1)
assert x1.ndim == 1 or x1.ndim == 2, "bad number of dimensions in input x1"
if x1.ndim == 2:
assert x1.shape[1] == D - 1, "bad shape for x1"
else:
if D == 2:
x1 = np.reshape(x1, (len(x1), 1))
else:
x1 = np.reshape(x1, (1, D - 1))
n1 = x1.shape[0]
x2 = np.array(x2)
assert x2.ndim == 1 or x2.ndim == 2, "bad number of dimensions in input x2"
if x2.ndim == 2:
assert x2.shape[1] == D - 1, "bad shape for x2"
else:
if D == 2:
x2 = np.reshape(x2, (len(x2), 1))
else:
x2 = np.reshape(x2, (1, D - 1))
n2 = x2.shape[0]
return x1, n1, x2, n2, params, D |
Python | def calc_drdtheta(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the hyperparameters
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters.
The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is
the length of ``params``, ``n1`` is the length of the first axis of ``x1``,
and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the gradient and Hessian of the kernel. The first index
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first axis
indicates the different derivative components (i.e. the derivative
with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(-params[:(D - 1)])
drdtheta = np.zeros((D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d in range(D - 1):
drdtheta[d] = (0.5 * np.exp(params[d]) / r_matrix *
cdist(np.reshape(x1[:,d], (n1, 1)),
np.reshape(x2[:,d], (n2, 1)), "sqeuclidean"))
return drdtheta | def calc_drdtheta(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the hyperparameters
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the hyperparameters.
The gradient is held in an array with shape ``(D, n1, n2)``, where ``D`` is
the length of ``params``, ``n1`` is the length of the first axis of ``x1``,
and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the gradient and Hessian of the kernel. The first index
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first axis
indicates the different derivative components (i.e. the derivative
with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(-params[:(D - 1)])
drdtheta = np.zeros((D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d in range(D - 1):
drdtheta[d] = (0.5 * np.exp(params[d]) / r_matrix *
cdist(np.reshape(x1[:,d], (n1, 1)),
np.reshape(x2[:,d], (n2, 1)), "sqeuclidean"))
return drdtheta |
Python | def calc_d2rdtheta2(self, x1, x2, params):
r"""
Calculate all second derivatives of the distance between all pairs of points with
respect to the hyperparameters
This method computes all second derivatives of the scaled Euclidean distance
between all pairs of points in ``x1`` and ``x2`` with respect to the
hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``,
where ``D`` is the length of ``params``, ``n1`` is the length of the first axis
of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in
the computation of the gradient and Hessian of the kernel. The first two indices
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the second derivatives of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first two axes
indicates the different derivative components (i.e. the second derivative
with respect to the first parameter is [0,0,:,:], the mixed partial with
respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(-params[:(D - 1)])
d2rdtheta2 = np.zeros((D - 1, D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d1 in range(D - 1):
for d2 in range(D - 1):
if d1 == d2:
d2rdtheta2[d1, d2] = (0.5*np.exp(params[d1]) / r_matrix *
cdist(np.reshape(x1[:,d1], (n1, 1)),
np.reshape(x2[:,d1], (n2, 1)), "sqeuclidean"))
d2rdtheta2[d1, d2] -= (0.25 * np.exp(params[d1]) *
np.exp(params[d2]) / r_matrix**3 *
cdist(np.reshape(x1[:,d1], (n1, 1)),
np.reshape(x2[:,d1], (n2, 1)), "sqeuclidean")*
cdist(np.reshape(x1[:,d2], (n1, 1)),
np.reshape(x2[:,d2], (n2, 1)), "sqeuclidean"))
return d2rdtheta2 | def calc_d2rdtheta2(self, x1, x2, params):
r"""
Calculate all second derivatives of the distance between all pairs of points with
respect to the hyperparameters
This method computes all second derivatives of the scaled Euclidean distance
between all pairs of points in ``x1`` and ``x2`` with respect to the
hyperparameters. The gradient is held in an array with shape ``(D, D, n1, n2)``,
where ``D`` is the length of ``params``, ``n1`` is the length of the first axis
of ``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in
the computation of the gradient and Hessian of the kernel. The first two indices
represents the different derivatives with respect to each hyperparameter.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the second derivatives of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to the hyperparameters.
Will be an array with shape ``(D, D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis of ``x1`` and
``n2`` is the length of the first axis of ``x2``. The first two axes
indicates the different derivative components (i.e. the second derivative
with respect to the first parameter is [0,0,:,:], the mixed partial with
respect to the first and second parameters is [0,1,:,:] or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
exp_theta = np.exp(-params[:(D - 1)])
d2rdtheta2 = np.zeros((D - 1, D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d1 in range(D - 1):
for d2 in range(D - 1):
if d1 == d2:
d2rdtheta2[d1, d2] = (0.5*np.exp(params[d1]) / r_matrix *
cdist(np.reshape(x1[:,d1], (n1, 1)),
np.reshape(x2[:,d1], (n2, 1)), "sqeuclidean"))
d2rdtheta2[d1, d2] -= (0.25 * np.exp(params[d1]) *
np.exp(params[d2]) / r_matrix**3 *
cdist(np.reshape(x1[:,d1], (n1, 1)),
np.reshape(x2[:,d1], (n2, 1)), "sqeuclidean")*
cdist(np.reshape(x1[:,d2], (n1, 1)),
np.reshape(x2[:,d2], (n2, 1)), "sqeuclidean"))
return d2rdtheta2 |
Python | def calc_drdx(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the first set of inputs
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the first input ``x1``.
The gradient is held in an array with shape ``(D - 1, n1, n2)``, where ``D`` is the
length of ``params``, ``n1`` is the length of the first axis of
``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the derivative of the kernel with respect to the inputs. The first
index represents the different derivatives with respect to each input dimension.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to ``x1``.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis
of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first
axis indicates the different derivative components (i.e. the derivative
with respect to the first input parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
drdx = np.zeros((D - 1, n1, n2))
exp_theta = np.exp(params[:(D - 1)])
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d in range(D - 1):
drdx[d] = exp_theta[d]*(x1[:, d].flatten()[ :, None ] -
x2[:, d].flatten()[ None, : ])/r_matrix
return drdx | def calc_drdx(self, x1, x2, params):
r"""
Calculate the first derivative of the distance between all pairs of points with
respect to the first set of inputs
This method computes the derivative of the scaled Euclidean distance between
all pairs of points in ``x1`` and ``x2`` with respect to the first input ``x1``.
The gradient is held in an array with shape ``(D - 1, n1, n2)``, where ``D`` is the
length of ``params``, ``n1`` is the length of the first axis of
``x1``, and ``n2`` is the length of the first axis of ``x2``. This is used in the
computation of the derivative of the kernel with respect to the inputs. The first
index represents the different derivatives with respect to each input dimension.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the pair-wise distances between
points in arrays ``x1`` and ``x2`` with respect to ``x1``.
Will be an array with shape ``(D, n1, n2)``, where ``D`` is the length
of ``params``, ``n1`` is the length of the first axis
of ``x1`` and ``n2`` is the length of the first axis of ``x2``. The first
axis indicates the different derivative components (i.e. the derivative
with respect to the first input parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
drdx = np.zeros((D - 1, n1, n2))
exp_theta = np.exp(params[:(D - 1)])
r_matrix = self.calc_r(x1, x2, params)
r_matrix[(r_matrix == 0.)] = 1.
for d in range(D - 1):
drdx[d] = exp_theta[d]*(x1[:, d].flatten()[ :, None ] -
x2[:, d].flatten()[ None, : ])/r_matrix
return drdx |
Python | def kernel_f(self, x1, x2, params):
r"""
Compute kernel values for a set of inputs
Returns the value of the kernel for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric and then evaluates the kernel function
for those distances.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all kernel values between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1``
is the length of the first axis of ``x1`` and ``n2`` is the length
of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
return np.exp(params[D - 1]) * self.calc_K(self.calc_r(x1, x2, params)) | def kernel_f(self, x1, x2, params):
r"""
Compute kernel values for a set of inputs
Returns the value of the kernel for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric and then evaluates the kernel function
for those distances.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding all kernel values between points in arrays ``x1``
and ``x2``. Will be an array with shape ``(n1, n2)``, where ``n1``
is the length of the first axis of ``x1`` and ``n2`` is the length
of the first axis of ``x2``.
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
return np.exp(params[D - 1]) * self.calc_K(self.calc_r(x1, x2, params)) |
Python | def kernel_deriv(self, x1, x2, params):
r"""
Compute kernel gradient for a set of inputs
Returns the value of the kernel gradient for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the gradient of the kernel function between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the
length of the first axis of ``x1`` and ``n2`` is the length of the first axis
of ``x2``. The first axis indicates the different derivative components
(i.e. the derivative with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
dKdtheta = np.zeros((D, n1, n2))
dKdtheta[-1] = self.kernel_f(x1, x2, params)
dKdr = self.calc_dKdr(self.calc_r(x1, x2, params))
drdtheta = self.calc_drdtheta(x1, x2, params)
for d in range(D - 1):
dKdtheta[d] = np.exp(params[-1]) * dKdr * drdtheta[d]
return dKdtheta | def kernel_deriv(self, x1, x2, params):
r"""
Compute kernel gradient for a set of inputs
Returns the value of the kernel gradient for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the gradient of the kernel function between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is the
length of the first axis of ``x1`` and ``n2`` is the length of the first axis
of ``x2``. The first axis indicates the different derivative components
(i.e. the derivative with respect to the first parameter is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
dKdtheta = np.zeros((D, n1, n2))
dKdtheta[-1] = self.kernel_f(x1, x2, params)
dKdr = self.calc_dKdr(self.calc_r(x1, x2, params))
drdtheta = self.calc_drdtheta(x1, x2, params)
for d in range(D - 1):
dKdtheta[d] = np.exp(params[-1]) * dKdr * drdtheta[d]
return dKdtheta |
Python | def kernel_hessian(self, x1, x2, params):
r"""
Calculate the Hessian of the kernel evaluated for all pairs of points with
respect to the hyperparameters
Returns the value of the kernel Hessian for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the Hessian of the pair-wise distances between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is
the length of the first axis of ``x1`` and ``n2`` is the length of the first
axis of ``x2``. The first two axes indicates the different derivative components
(i.e. the second derivative with respect to the first parameter is [0,0,:,:],
the mixed partial with respect to the first and second parameters is [0,1,:,:]
or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
d2Kdtheta2 = np.zeros((D, D, n1, n2))
d2Kdtheta2[-1, :] = self.kernel_deriv(x1, x2, params)
d2Kdtheta2[:, -1] = d2Kdtheta2[-1, :]
r_matrix = self.calc_r(x1, x2, params)
dKdr = self.calc_dKdr(r_matrix)
d2Kdr2 = self.calc_d2Kdr2(r_matrix)
drdtheta = self.calc_drdtheta(x1, x2, params)
d2rdtheta2 = self.calc_d2rdtheta2(x1, x2, params)
for d1 in range(D - 1):
for d2 in range(D - 1):
d2Kdtheta2[d1, d2] = np.exp(params[-1]) * (d2Kdr2 *
drdtheta[d1] * drdtheta[d2] +
dKdr * d2rdtheta2[d1, d2])
return d2Kdtheta2 | def kernel_hessian(self, x1, x2, params):
r"""
Calculate the Hessian of the kernel evaluated for all pairs of points with
respect to the hyperparameters
Returns the value of the kernel Hessian for two sets of input points and a choice of
hyperparameters. This function should not need to be modified for different choices
of the kernel function or distance metric, as after checking the inputs it simply
calls the routine to compute the distance metric, kernel function, and the appropriate
derivative functions of the distance and kernel functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the Hessian of the pair-wise distances between points in arrays
``x1`` and ``x2`` with respect to the hyperparameters. Will be an array with
shape ``(D, D, n1, n2)``, where ``D`` is the length of ``params``, ``n1`` is
the length of the first axis of ``x1`` and ``n2`` is the length of the first
axis of ``x2``. The first two axes indicates the different derivative components
(i.e. the second derivative with respect to the first parameter is [0,0,:,:],
the mixed partial with respect to the first and second parameters is [0,1,:,:]
or [1,0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
d2Kdtheta2 = np.zeros((D, D, n1, n2))
d2Kdtheta2[-1, :] = self.kernel_deriv(x1, x2, params)
d2Kdtheta2[:, -1] = d2Kdtheta2[-1, :]
r_matrix = self.calc_r(x1, x2, params)
dKdr = self.calc_dKdr(r_matrix)
d2Kdr2 = self.calc_d2Kdr2(r_matrix)
drdtheta = self.calc_drdtheta(x1, x2, params)
d2rdtheta2 = self.calc_d2rdtheta2(x1, x2, params)
for d1 in range(D - 1):
for d2 in range(D - 1):
d2Kdtheta2[d1, d2] = np.exp(params[-1]) * (d2Kdr2 *
drdtheta[d1] * drdtheta[d2] +
dKdr * d2rdtheta2[d1, d2])
return d2Kdtheta2 |
Python | def kernel_inputderiv(self, x1, x2, params):
r"""
Compute derivative of Kernel with respect to inputs x1
Returns the value of the kernel derivative with respect to the first set of input
points given inputs and a choice of hyperparameters. This function should not need
to be modified for different choices of the kernel function or distance metric, as
after checking the inputs it simply calls the routine to compute the distance metric,
kernel function, and the appropriate derivative functions of the distance and kernel
functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the kernel function between points in arrays
``x1`` and ``x2`` with respect to the first inputs ``x1``. Will be an array with
shape ``(D, n1, n2)``, where ``D`` is the length of ``params``,
``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the
first axis of ``x2``. The first axis indicates the different derivative components
(i.e. the derivative with respect to the first input dimension is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
dKdx = np.zeros((D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
dKdr = self.calc_dKdr(r_matrix)
drdx = self.calc_drdx(x1, x2, params)
for d in range(D - 1):
dKdx[d] = np.exp(params[-1]) * dKdr * drdx[d]
return dKdx | def kernel_inputderiv(self, x1, x2, params):
r"""
Compute derivative of Kernel with respect to inputs x1
Returns the value of the kernel derivative with respect to the first set of input
points given inputs and a choice of hyperparameters. This function should not need
to be modified for different choices of the kernel function or distance metric, as
after checking the inputs it simply calls the routine to compute the distance metric,
kernel function, and the appropriate derivative functions of the distance and kernel
functions.
:param x1: First input array. Must be a 1-D or 2-D array, with the length of
the last dimension matching the last dimension of ``x2`` and
one less than the length of ``params``. ``x1`` may be 1-D if either
each point consists of a single parameter (and ``params`` has length
2) or the array only contains a single point (in which case, the array
will be reshaped to ``(1, D - 1)``).
:type x1: array-like
:param x2: Second input array. The same restrictions that apply to ``x1`` also
apply here.
:type x2: array-like
:param params: Hyperparameter array. Must be 1-D with length one greater than
the last dimension of ``x1`` and ``x2``.
:type params: array-like
:returns: Array holding the derivative of the kernel function between points in arrays
``x1`` and ``x2`` with respect to the first inputs ``x1``. Will be an array with
shape ``(D, n1, n2)``, where ``D`` is the length of ``params``,
``n1`` is the length of the first axis of ``x1`` and ``n2`` is the length of the
first axis of ``x2``. The first axis indicates the different derivative components
(i.e. the derivative with respect to the first input dimension is [0,:,:], etc.)
:rtype: ndarray
"""
x1, n1, x2, n2, params, D = self._check_inputs(x1, x2, params)
dKdx = np.zeros((D - 1, n1, n2))
r_matrix = self.calc_r(x1, x2, params)
dKdr = self.calc_dKdr(r_matrix)
drdx = self.calc_drdx(x1, x2, params)
for d in range(D - 1):
dKdx[d] = np.exp(params[-1]) * dKdr * drdx[d]
return dKdx |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.