language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def simulation(turns=2 * 10**6, max_strat=70):
"""
This is a simulation for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param turns: int this is how many turns the function will simulate to get the answer.
:param max_strat: int The simulation will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
hist = [0] * max_strat
for turn in range(1, turns+1):
if not turn %10**4:
print("{} turns finished {:.2f}% done".format(turn, (turn/turns)*100), end="\r")
turn_score = 0
while turn_score < max_strat:
roll = random.randint(1, DICE_SIDES)
if roll <= FAILS:
break
for ndx in range(turn_score,min(turn_score + roll, max_strat)):
hist[ndx] += (turn_score + roll)
turn_score += roll
print("")
hist = [x/turn for x in hist]
return hist | def simulation(turns=2 * 10**6, max_strat=70):
"""
This is a simulation for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param turns: int this is how many turns the function will simulate to get the answer.
:param max_strat: int The simulation will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
hist = [0] * max_strat
for turn in range(1, turns+1):
if not turn %10**4:
print("{} turns finished {:.2f}% done".format(turn, (turn/turns)*100), end="\r")
turn_score = 0
while turn_score < max_strat:
roll = random.randint(1, DICE_SIDES)
if roll <= FAILS:
break
for ndx in range(turn_score,min(turn_score + roll, max_strat)):
hist[ndx] += (turn_score + roll)
turn_score += roll
print("")
hist = [x/turn for x in hist]
return hist |
Python | def solver(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [0] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
solved[ndx] += hist[ndx - offset] * (ndx - offset + roll) / DICE_SIDES
return solved[1:] | def solver(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the expected value of each turn given a particular goal score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of floats with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [0] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
solved[ndx] += hist[ndx - offset] * (ndx - offset + roll) / DICE_SIDES
return solved[1:] |
Python | def solver2(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the chace for each point total of each turn given a particular goal
score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of dicts that are corresponding to the with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [{}] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
solved[ndx] = {0: 0}
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
if (ndx - offset + roll) not in solved[ndx]:
solved[ndx][(ndx - offset + roll)] = 0
solved[ndx][(ndx - offset + roll)] += hist[ndx - offset] / DICE_SIDES
left = 1.
for i in solved[ndx]:
left -= solved[ndx][i]
solved[ndx][0] = left
return solved[1:] | def solver2(max_strat=70):
"""
This is a solver for the game pig assuming that strategies are based on stop bidding when you reach a particular
score for a round. This function will return the chace for each point total of each turn given a particular goal
score.
:param max_strat: this is an int. The solver will give expected returns for each goal score up to this number.
:return: this will return a list of dicts that are corresponding to the with max_strat entries where each entry is an expected return on a
single turn.
"""
if max_strat < DICE_SIDES:
raise ValueError("need to increase max strategy for this one, fam.")
hist = [0] * (max_strat + 1)
hist[0] = 1
solved = [{}] * (max_strat + 1)
for ndx in range(1, len(hist)):
for roll in range(FAILS+1, DICE_SIDES+1):
hist[ndx] += hist[ndx - roll] / DICE_SIDES
for ndx in range(1, len(hist)):
solved[ndx] = {0: 0}
for offset in range(1, DICE_SIDES+1):
if ndx-offset >= 0:
for roll in range(FAILS + 1, DICE_SIDES + 1):
if offset <= roll:
if (ndx - offset + roll) not in solved[ndx]:
solved[ndx][(ndx - offset + roll)] = 0
solved[ndx][(ndx - offset + roll)] += hist[ndx - offset] / DICE_SIDES
left = 1.
for i in solved[ndx]:
left -= solved[ndx][i]
solved[ndx][0] = left
return solved[1:] |
Python | def game_strat_approximater(chances, goal=200, scans=30):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param scans: this is how many times the approximater will update each of the game states before resolving
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0. for _ in range(goal)] for __ in range(goal)]
for yyy in range(scans):
print("{} scans complete {:.2f}% done".format(yyy, yyy / scans * 100), end="\r")
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score > best_score:
best_score = score
best_strat = ndx + 1
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete {:.2f}% done".format(yyy+1, (yyy+1) / scans * 100))
return expected, strats | def game_strat_approximater(chances, goal=200, scans=30):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param scans: this is how many times the approximater will update each of the game states before resolving
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0. for _ in range(goal)] for __ in range(goal)]
for yyy in range(scans):
print("{} scans complete {:.2f}% done".format(yyy, yyy / scans * 100), end="\r")
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score > best_score:
best_score = score
best_strat = ndx + 1
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete {:.2f}% done".format(yyy+1, (yyy+1) / scans * 100))
return expected, strats |
Python | def game_strat_approximater2(chances, goal=200, threshold=10**-3):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param threshold: this is a maximum level of change from any point value's chance of winning in a perticular
iteration compared to it's chance of wining in the last iteration before the approximater will resolve.
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0 for _ in range(goal)] for __ in range(goal)]
delta = 1
yyy = 0
while delta >= threshold:
print("{} scans complete delta = {:.6f}".format(yyy, delta), end="\r")
delta = 0
yyy += 1
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score >= best_score:
best_score = score
best_strat = ndx + 1
delta = max(delta, abs(expected[x_score][n_score] - best_score))
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete delta = {:.6f}".format(yyy, delta))
return expected, strats | def game_strat_approximater2(chances, goal=200, threshold=10**-3):
"""
this will give an output of the chances and appropriate strategy to follow at the start of a round in the pig game
:param chances: this is a result of solver2 for the game that you want to play
:param goal: this is an int that describes the number of points that you need to win
:param threshold: this is a maximum level of change from any point value's chance of winning in a perticular
iteration compared to it's chance of wining in the last iteration before the approximater will resolve.
:return: 2 2-d lists the first says what your optimal strategy is, the second says what your chances of wining
with that strategy are.
"""
expected = [[.25 for _ in range(goal)] for __ in range(goal)]
strats = [[0 for _ in range(goal)] for __ in range(goal)]
delta = 1
yyy = 0
while delta >= threshold:
print("{} scans complete delta = {:.6f}".format(yyy, delta), end="\r")
delta = 0
yyy += 1
for x_score in range(goal-1, -1, -1):
for n_score in range(goal - 1, -1, -1):
best_strat = None
best_score = 0. #this should never be below 0 so I chosse this instead of float("-inf")
for ndx in range(len(chances)):
strat = chances[ndx]
score = 0.
for points in strat:
if (points + x_score) >= goal:
score += strat[points]
else:
score += strat[points] * (1. - expected[n_score][x_score + points])
if score >= best_score:
best_score = score
best_strat = ndx + 1
delta = max(delta, abs(expected[x_score][n_score] - best_score))
expected[x_score][n_score] = best_score
strats[x_score][n_score] = best_strat
print("{} scans complete delta = {:.6f}".format(yyy, delta))
return expected, strats |
Python | def unbake(imageFile):
"""
Return the openbadges content contained in a baked PNG file.
If this doesn't work, return None.
If there is both an iTXt and tEXt chunk with keyword openbadges,
the iTXt chunk content will be returned.
"""
reader = png.Reader(file=imageFile)
for chunktype, content in reader.chunks():
if chunktype == b'iTXt' and content.startswith(b'openbadges\x00'):
return re.sub(b'openbadges[\x00]+', b'', content).decode('utf8')
elif chunktype == b'tEXt' and content.startswith(b'openbadges\x00'):
return content.split(b'\x00')[1].decode('utf8') | def unbake(imageFile):
"""
Return the openbadges content contained in a baked PNG file.
If this doesn't work, return None.
If there is both an iTXt and tEXt chunk with keyword openbadges,
the iTXt chunk content will be returned.
"""
reader = png.Reader(file=imageFile)
for chunktype, content in reader.chunks():
if chunktype == b'iTXt' and content.startswith(b'openbadges\x00'):
return re.sub(b'openbadges[\x00]+', b'', content).decode('utf8')
elif chunktype == b'tEXt' and content.startswith(b'openbadges\x00'):
return content.split(b'\x00')[1].decode('utf8') |
Python | def bake(imageFile, assertion_string, newfile=None):
"""
Embeds a serialized representation of a badge instance in a PNG image file.
"""
encoded_assertion_string = codecs.getwriter('utf-8')(assertion_string)
reader = png.Reader(file=imageFile)
if newfile is None:
newfile = NamedTemporaryFile(suffix='.png')
chunkheader = b'openbadges\x00\x00\x00\x00\x00'
chunk_content = chunkheader + encoded_assertion_string.stream.encode('utf-8')
badge_chunk = (b'iTXt', chunk_content)
png.write_chunks(newfile, baked_chunks(reader.chunks(), badge_chunk))
newfile.seek(0)
return newfile | def bake(imageFile, assertion_string, newfile=None):
"""
Embeds a serialized representation of a badge instance in a PNG image file.
"""
encoded_assertion_string = codecs.getwriter('utf-8')(assertion_string)
reader = png.Reader(file=imageFile)
if newfile is None:
newfile = NamedTemporaryFile(suffix='.png')
chunkheader = b'openbadges\x00\x00\x00\x00\x00'
chunk_content = chunkheader + encoded_assertion_string.stream.encode('utf-8')
badge_chunk = (b'iTXt', chunk_content)
png.write_chunks(newfile, baked_chunks(reader.chunks(), badge_chunk))
newfile.seek(0)
return newfile |
Python | def baked_chunks(original_chunks, badge_chunk):
"""
Returns an iterable of chunks that places the Open Badges baked chunk
and filters out any previous Open Badges chunk that may have existed.
"""
def is_not_previous_assertion(chunk):
if chunk[1].startswith(b'openbadges\x00'):
return False
return True
first_slice = next(original_chunks)
last_slice = list(filter(
is_not_previous_assertion,
original_chunks
))
return itertools.chain([first_slice], [badge_chunk], last_slice) | def baked_chunks(original_chunks, badge_chunk):
"""
Returns an iterable of chunks that places the Open Badges baked chunk
and filters out any previous Open Badges chunk that may have existed.
"""
def is_not_previous_assertion(chunk):
if chunk[1].startswith(b'openbadges\x00'):
return False
return True
first_slice = next(original_chunks)
last_slice = list(filter(
is_not_previous_assertion,
original_chunks
))
return itertools.chain([first_slice], [badge_chunk], last_slice) |
Python | def bake(input_file, output_file, data):
"""
This command bakes Open Badges data into a file and saves
the result to an output file.
Positional Arguments:
\b
Input filename: File must exist.
\b
Output filename: If file exists, it will be overwritten.
"""
output = utils.bake(input_file, data, output_file)
click.echo(
"{} is done baking. Remember to let it cool".format(output_file.name)
) | def bake(input_file, output_file, data):
"""
This command bakes Open Badges data into a file and saves
the result to an output file.
Positional Arguments:
\b
Input filename: File must exist.
\b
Output filename: If file exists, it will be overwritten.
"""
output = utils.bake(input_file, data, output_file)
click.echo(
"{} is done baking. Remember to let it cool".format(output_file.name)
) |
Python | def unbake(input_file, output_file):
"""
This command extracts Open Badges data from an image and
prints it to a file or the standard output.
Positional Arguments:
\b
Input filename: File must exist.
\b
Output filename: If file exists, it will be overwritten.
"""
click.echo('')
output_file.write(utils.unbake(input_file).encode('utf8'))
click.echo('\n') | def unbake(input_file, output_file):
"""
This command extracts Open Badges data from an image and
prints it to a file or the standard output.
Positional Arguments:
\b
Input filename: File must exist.
\b
Output filename: If file exists, it will be overwritten.
"""
click.echo('')
output_file.write(utils.unbake(input_file).encode('utf8'))
click.echo('\n') |
Python | def argparsing():
"""
Handling command-line options, default values and options provided by
configurations files.
"""
conf_parser = argparse.ArgumentParser(
description="This tool send MQTT messages to random topics",
prog="mqtt-randompub",
epilog="Please report all bugs and comment.",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
)
conf_parser.add_argument("-f", "--config", help="configuration file to use")
args, remaining_argv = conf_parser.parse_known_args()
if args.config:
config = configparser.SafeConfigParser()
config.read([args.config])
default_mqtt = dict(config.items("MQTT"))
default_topic = dict(config.items("Topic"))
default_payload = dict(config.items("Payload"))
else:
default_mqtt = {"broker": "127.0.0.1", "port": "1883", "qos": "0"}
default_topic = {
"topic": "test",
"subtopic1": ["a", "b", "c"],
"subtopic2": [0, 1],
}
default_payload = {"load": "## Test message from mqtt-randompub."}
parser = argparse.ArgumentParser(parents=[conf_parser])
parser.set_defaults(**default_mqtt)
parser.set_defaults(**default_topic)
parser.set_defaults(**default_payload)
parser.add_argument("-b", "--broker", help="set the broker")
parser.add_argument("-p", "--port", help="set the broker port")
parser.add_argument("-q", "--qos", help="set the QoS of the messages")
parser.add_argument("-t", "--topic", help="set the main topic")
parser.add_argument("-s", "--subtopic1", help="set the first subtopic")
parser.add_argument("-d", "--subtopic2", help="set the second subtopic")
parser.add_argument("-l", "--load", help="what to use as message payload")
parser.add_argument(
"-i", "--interval", default=1.0, help="time in seconds between the messages"
)
parser.add_argument(
"-n",
"--number",
default=1,
help="number of messages to send. set to 0 for running",
)
parser.add_argument(
"-r",
"--random",
default=False,
action="store_true",
help="generate a random numerical payload",
)
parser.add_argument(
"-w",
"--timestamp",
default=False,
action="store_true",
help="add a timestamp to the payload message",
)
parser.add_argument(
"-c",
"--counter",
default=False,
action="store_true",
help="add a counter to the payload message",
)
args = parser.parse_args(remaining_argv)
return args | def argparsing():
"""
Handling command-line options, default values and options provided by
configurations files.
"""
conf_parser = argparse.ArgumentParser(
description="This tool send MQTT messages to random topics",
prog="mqtt-randompub",
epilog="Please report all bugs and comment.",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False,
)
conf_parser.add_argument("-f", "--config", help="configuration file to use")
args, remaining_argv = conf_parser.parse_known_args()
if args.config:
config = configparser.SafeConfigParser()
config.read([args.config])
default_mqtt = dict(config.items("MQTT"))
default_topic = dict(config.items("Topic"))
default_payload = dict(config.items("Payload"))
else:
default_mqtt = {"broker": "127.0.0.1", "port": "1883", "qos": "0"}
default_topic = {
"topic": "test",
"subtopic1": ["a", "b", "c"],
"subtopic2": [0, 1],
}
default_payload = {"load": "## Test message from mqtt-randompub."}
parser = argparse.ArgumentParser(parents=[conf_parser])
parser.set_defaults(**default_mqtt)
parser.set_defaults(**default_topic)
parser.set_defaults(**default_payload)
parser.add_argument("-b", "--broker", help="set the broker")
parser.add_argument("-p", "--port", help="set the broker port")
parser.add_argument("-q", "--qos", help="set the QoS of the messages")
parser.add_argument("-t", "--topic", help="set the main topic")
parser.add_argument("-s", "--subtopic1", help="set the first subtopic")
parser.add_argument("-d", "--subtopic2", help="set the second subtopic")
parser.add_argument("-l", "--load", help="what to use as message payload")
parser.add_argument(
"-i", "--interval", default=1.0, help="time in seconds between the messages"
)
parser.add_argument(
"-n",
"--number",
default=1,
help="number of messages to send. set to 0 for running",
)
parser.add_argument(
"-r",
"--random",
default=False,
action="store_true",
help="generate a random numerical payload",
)
parser.add_argument(
"-w",
"--timestamp",
default=False,
action="store_true",
help="add a timestamp to the payload message",
)
parser.add_argument(
"-c",
"--counter",
default=False,
action="store_true",
help="add a counter to the payload message",
)
args = parser.parse_args(remaining_argv)
return args |
Python | def financials(self):
"""
A getter that returns a Panda Dataframe with the yearly financials for the past 4 years.
Ticker.py uses relative imports so use this command to run tests: python -m yfinance.ticker -v
>>> import os
>>> import dill
>>> # Adding mock data to isolate our getter from the Ticker constructor :
>>> f = open("msft.dill", "rb")
>>> msft = dill.load(f)
>>> # Adding mocked flag to isolate our getter from get_fundamentals() :
>>> msft.ismocked = True
>>> f.close()
>>> msft.financials
2020-06-30 2019-06-30 2018-06-30 2017-06-30
Research Development 19269000000.0 16876000000.0 14726000000.0 13037000000.0
Effect Of Accounting Charges None None None None
Income Before Tax 53036000000.0 43688000000.0 36474000000.0 29901000000.0
Minority Interest None None None None
Net Income 44281000000.0 39240000000.0 16571000000.0 25489000000.0
Selling General Administrative 24709000000.0 23098000000.0 22223000000.0 19942000000.0
Gross Profit 96937000000.0 82933000000.0 72007000000.0 62310000000.0
Ebit 52959000000.0 42959000000.0 35058000000.0 29331000000.0
Operating Income 52959000000.0 42959000000.0 35058000000.0 29331000000.0
Other Operating Expenses None None None None
Interest Expense -2591000000.0 -2686000000.0 -2733000000.0 -2222000000.0
Extraordinary Items None None None None
Non Recurring None None None None
Other Items None None None None
Income Tax Expense 8755000000.0 4448000000.0 19903000000.0 4412000000.0
Total Revenue 143015000000.0 125843000000.0 110360000000.0 96571000000.0
Total Operating Expenses 90056000000.0 82884000000.0 75302000000.0 67240000000.0
Cost Of Revenue 46078000000.0 42910000000.0 38353000000.0 34261000000.0
Total Other Income Expense Net 77000000.0 729000000.0 1416000000.0 570000000.0
Discontinued Operations None None None None
Net Income From Continuing Ops 44281000000.0 39240000000.0 16571000000.0 25489000000.0
Net Income Applicable To Common Shares 44281000000.0 39240000000.0 16571000000.0 25489000000.0
"""
# Run method using mocked data without making a call to the Yahoo Finance api (for testing)
if self.ismocked == True: return self.get_financials(ismocked = True)
# Run method normally (for production)
else: return self.get_financials() | def financials(self):
"""
A getter that returns a Panda Dataframe with the yearly financials for the past 4 years.
Ticker.py uses relative imports so use this command to run tests: python -m yfinance.ticker -v
>>> import os
>>> import dill
>>> # Adding mock data to isolate our getter from the Ticker constructor :
>>> f = open("msft.dill", "rb")
>>> msft = dill.load(f)
>>> # Adding mocked flag to isolate our getter from get_fundamentals() :
>>> msft.ismocked = True
>>> f.close()
>>> msft.financials
2020-06-30 2019-06-30 2018-06-30 2017-06-30
Research Development 19269000000.0 16876000000.0 14726000000.0 13037000000.0
Effect Of Accounting Charges None None None None
Income Before Tax 53036000000.0 43688000000.0 36474000000.0 29901000000.0
Minority Interest None None None None
Net Income 44281000000.0 39240000000.0 16571000000.0 25489000000.0
Selling General Administrative 24709000000.0 23098000000.0 22223000000.0 19942000000.0
Gross Profit 96937000000.0 82933000000.0 72007000000.0 62310000000.0
Ebit 52959000000.0 42959000000.0 35058000000.0 29331000000.0
Operating Income 52959000000.0 42959000000.0 35058000000.0 29331000000.0
Other Operating Expenses None None None None
Interest Expense -2591000000.0 -2686000000.0 -2733000000.0 -2222000000.0
Extraordinary Items None None None None
Non Recurring None None None None
Other Items None None None None
Income Tax Expense 8755000000.0 4448000000.0 19903000000.0 4412000000.0
Total Revenue 143015000000.0 125843000000.0 110360000000.0 96571000000.0
Total Operating Expenses 90056000000.0 82884000000.0 75302000000.0 67240000000.0
Cost Of Revenue 46078000000.0 42910000000.0 38353000000.0 34261000000.0
Total Other Income Expense Net 77000000.0 729000000.0 1416000000.0 570000000.0
Discontinued Operations None None None None
Net Income From Continuing Ops 44281000000.0 39240000000.0 16571000000.0 25489000000.0
Net Income Applicable To Common Shares 44281000000.0 39240000000.0 16571000000.0 25489000000.0
"""
# Run method using mocked data without making a call to the Yahoo Finance api (for testing)
if self.ismocked == True: return self.get_financials(ismocked = True)
# Run method normally (for production)
else: return self.get_financials() |
Python | def save(model: Model,
name: str,
path: str = None,
trained_time_utc: datetime.datetime = None,
instance_id: str = None,
tags: Dict = None,
now_func: Callable[[], datetime.datetime] = None,
uuid_str_func: Callable[[], str] = None):
"""
Saves the `Model` wrapper that wraps over your underyling trained model(e.g. `tensorflow`, `pytorch`).
:param model: The Model object which wraps your underlying trained model.
:param name: The directory which will contain all the model artifacts.
:param path: The path where you would like to save the model directory.
:param trained_time_utc: You can specify a training time.
:param instance_id: An unique id to identify this particular training. The same script run on two separate
occassions should ideally have different instance ids.
:param tags: Key value pairs that you can associate with your model instance.
:param now_func: For testing purposes only.
:param uuid_str_func: For testing purposes only.
"""
now_func = now_func or util.utc_now_str
uuid_str_func = uuid_str_func or util.uuid
path = path or os.getcwd()
model_dir = os.path.join(path, name)
os.makedirs(model_dir, exist_ok=True)
with open(helper.model_path(model_dir), "wb") as f:
cloudpickle.dump(model, f)
opts = util.optional(tags=tags)
fixed = dict(trained_time_utc=trained_time_utc.isoformat() if trained_time_utc else now_func(),
instance_id=instance_id or uuid_str_func())
meta = dict(thampi_data_version='0.1')
properties = util.dicts(meta, fixed, opts)
with open(helper.properties_path(model_dir), "w") as f:
json.dump(properties, f, indent=4, ensure_ascii=False) | def save(model: Model,
name: str,
path: str = None,
trained_time_utc: datetime.datetime = None,
instance_id: str = None,
tags: Dict = None,
now_func: Callable[[], datetime.datetime] = None,
uuid_str_func: Callable[[], str] = None):
"""
Saves the `Model` wrapper that wraps over your underyling trained model(e.g. `tensorflow`, `pytorch`).
:param model: The Model object which wraps your underlying trained model.
:param name: The directory which will contain all the model artifacts.
:param path: The path where you would like to save the model directory.
:param trained_time_utc: You can specify a training time.
:param instance_id: An unique id to identify this particular training. The same script run on two separate
occassions should ideally have different instance ids.
:param tags: Key value pairs that you can associate with your model instance.
:param now_func: For testing purposes only.
:param uuid_str_func: For testing purposes only.
"""
now_func = now_func or util.utc_now_str
uuid_str_func = uuid_str_func or util.uuid
path = path or os.getcwd()
model_dir = os.path.join(path, name)
os.makedirs(model_dir, exist_ok=True)
with open(helper.model_path(model_dir), "wb") as f:
cloudpickle.dump(model, f)
opts = util.optional(tags=tags)
fixed = dict(trained_time_utc=trained_time_utc.isoformat() if trained_time_utc else now_func(),
instance_id=instance_id or uuid_str_func())
meta = dict(thampi_data_version='0.1')
properties = util.dicts(meta, fixed, opts)
with open(helper.properties_path(model_dir), "w") as f:
json.dump(properties, f, indent=4, ensure_ascii=False) |
Python | def serve(environment: str,
model_dir: str,
dependency_file: str,
zappa_settings_file: str = None,
project_dir: str = None,
served_time_utc: datetime.datetime = None,
docker_run_func=None,
setup_working_dir_func=None,
clean_up_func=None,
uuid_str_func=None,
aws_module=None,
project_exists_func=None,
now_func=None,
read_properties_func: Callable[[str], Dict] = None
):
"""
Serves the model directory and updates the AWS Lambda instance (or creates it if it does not exist).
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:param model_dir: The directory which contains the model artifacts (i.e `model.pkl`, `thampi.json`).
:param dependency_file: Usually path to a `requirements.txt` for pip or the conda equivalent. Note, the conda file
has to be manually created. See the `Limitations` section in the `Overview` document.
:param zappa_settings_file: Path to `zappa_settings.json` file.
:param project_dir: The base directory for all the helper data/files/modules that may be called within your
model.pkl file.
:param served_time_utc: Explicitly set a time that it was served.
:param docker_run_func: For Testing purposes only
:param setup_working_dir_func: For Testing purposes only
:param clean_up_func: For Testing purposes only
:param uuid_str_func: For Testing purposes only
:param aws_module: For Testing purposes only
:param project_exists_func: For Testing purposes only
:param now_func: For Testing purposes only
:param read_properties_func: For Testing purposes only
:return: None
"""
project_working_dir = None
check_environment_provided(environment=environment)
docker_run_func = docker_run_func or run_zappa_command_in_docker
setup_working_dir_func = setup_working_dir_func or setup_working_directory
clean_up_func = clean_up_func or clean_up
uuid_str_func = uuid_str_func or util.uuid
aws_module = aws_module or aws
project_exists_func = project_exists_func or helper.project_exists
now_func = now_func or util.utc_now_str
read_properties_func = read_properties_func or read_properties
served_time_utc = served_time_utc.isoformat() if served_time_utc else now_func()
zappa_settings = read_zappa_file(zappa_settings_file)
clean_up_func(DEV_ENVIRONMENT, zappa_settings)
a_uuid = uuid_str_func()
project_name = zappa_settings[environment][constants.PROJECT_NAME]
bucket = zappa_settings[environment][constants.ZAPPA_BUCKET]
region_name = zappa_settings[environment][AWS_REGION]
aws_module.create_bucket(bucket)
training_properties = read_properties_func(model_dir)
properties = dict(training_properties, served_time_utc=served_time_utc)
stream = json.dumps(properties)
aws_module.upload_stream_to_s3(stream, bucket, helper.properties_key(environment, project_name))
model_key = helper.model_key(environment, project_name)
aws_module.upload_to_s3(helper.model_path(model_dir), bucket, model_key)
try:
project_working_dir, thampi_req_file = setup_working_dir_func(a_uuid, project_name, dependency_file,
project_dir)
docker_run_command = partial(docker_run_func,
a_uuid=a_uuid,
project_name=project_name,
project_working_dir=project_working_dir,
thampi_req_file=thampi_req_file,
zappa_settings=zappa_settings[environment])
if not project_exists_func(environment, project_name, region_name):
# if not project_exists(environment, project_name, region_name):
deploy_action = f'zappa deploy {environment}'
docker_run_command(zappa_action=deploy_action)
else:
update_action = f'zappa update {environment}'
docker_run_command(zappa_action=update_action)
finally:
if project_working_dir:
shutil.rmtree(project_working_dir) | def serve(environment: str,
model_dir: str,
dependency_file: str,
zappa_settings_file: str = None,
project_dir: str = None,
served_time_utc: datetime.datetime = None,
docker_run_func=None,
setup_working_dir_func=None,
clean_up_func=None,
uuid_str_func=None,
aws_module=None,
project_exists_func=None,
now_func=None,
read_properties_func: Callable[[str], Dict] = None
):
"""
Serves the model directory and updates the AWS Lambda instance (or creates it if it does not exist).
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:param model_dir: The directory which contains the model artifacts (i.e `model.pkl`, `thampi.json`).
:param dependency_file: Usually path to a `requirements.txt` for pip or the conda equivalent. Note, the conda file
has to be manually created. See the `Limitations` section in the `Overview` document.
:param zappa_settings_file: Path to `zappa_settings.json` file.
:param project_dir: The base directory for all the helper data/files/modules that may be called within your
model.pkl file.
:param served_time_utc: Explicitly set a time that it was served.
:param docker_run_func: For Testing purposes only
:param setup_working_dir_func: For Testing purposes only
:param clean_up_func: For Testing purposes only
:param uuid_str_func: For Testing purposes only
:param aws_module: For Testing purposes only
:param project_exists_func: For Testing purposes only
:param now_func: For Testing purposes only
:param read_properties_func: For Testing purposes only
:return: None
"""
project_working_dir = None
check_environment_provided(environment=environment)
docker_run_func = docker_run_func or run_zappa_command_in_docker
setup_working_dir_func = setup_working_dir_func or setup_working_directory
clean_up_func = clean_up_func or clean_up
uuid_str_func = uuid_str_func or util.uuid
aws_module = aws_module or aws
project_exists_func = project_exists_func or helper.project_exists
now_func = now_func or util.utc_now_str
read_properties_func = read_properties_func or read_properties
served_time_utc = served_time_utc.isoformat() if served_time_utc else now_func()
zappa_settings = read_zappa_file(zappa_settings_file)
clean_up_func(DEV_ENVIRONMENT, zappa_settings)
a_uuid = uuid_str_func()
project_name = zappa_settings[environment][constants.PROJECT_NAME]
bucket = zappa_settings[environment][constants.ZAPPA_BUCKET]
region_name = zappa_settings[environment][AWS_REGION]
aws_module.create_bucket(bucket)
training_properties = read_properties_func(model_dir)
properties = dict(training_properties, served_time_utc=served_time_utc)
stream = json.dumps(properties)
aws_module.upload_stream_to_s3(stream, bucket, helper.properties_key(environment, project_name))
model_key = helper.model_key(environment, project_name)
aws_module.upload_to_s3(helper.model_path(model_dir), bucket, model_key)
try:
project_working_dir, thampi_req_file = setup_working_dir_func(a_uuid, project_name, dependency_file,
project_dir)
docker_run_command = partial(docker_run_func,
a_uuid=a_uuid,
project_name=project_name,
project_working_dir=project_working_dir,
thampi_req_file=thampi_req_file,
zappa_settings=zappa_settings[environment])
if not project_exists_func(environment, project_name, region_name):
# if not project_exists(environment, project_name, region_name):
deploy_action = f'zappa deploy {environment}'
docker_run_command(zappa_action=deploy_action)
else:
update_action = f'zappa update {environment}'
docker_run_command(zappa_action=update_action)
finally:
if project_working_dir:
shutil.rmtree(project_working_dir) |
Python | def clean(scope: str):
"""
Clean up `thampi` relevant files.
NOTE: it won't delete the S3 bucket associated with this project. You'll have to do that manually.
:param scope: `project` or `all`. `project` will only clean up the resources for this project. `all` is for the
entire `thampi` installation locally.
"""
if scope == 'project':
clean_project()
elif scope == 'all':
clean_all() | def clean(scope: str):
"""
Clean up `thampi` relevant files.
NOTE: it won't delete the S3 bucket associated with this project. You'll have to do that manually.
:param scope: `project` or `all`. `project` will only clean up the resources for this project. `all` is for the
entire `thampi` installation locally.
"""
if scope == 'project':
clean_project()
elif scope == 'all':
clean_all() |
Python | def predict(environment: str, data: Dict) -> Dict:
"""
A convenience method to invoke and test the AWS Lambda instance endpoint.
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:param data: A dictionary that is send to the AWS Lambda endpoint and directly to the `predict` method in your model
file
:return: Your prediction
"""
zappa_settings = helper.read_zappa(helper.default_zappa_settings_path())
project_name = zappa_settings[environment][constants.PROJECT_NAME]
region_name = zappa_settings[environment][constants.REGION]
a_lambda_name = helper.lambda_name(environment, project_name)
# TODO: get api url for project/environment
url = helper.get_api_url(a_lambda_name, environment, region_name)
predict_url = url + '/' + project_name + '/' + 'predict'
headers = {"Content-type": "application/json"}
try:
result = requests.post(predict_url, headers=headers, data=json.dumps(dict(data=data)))
result.raise_for_status()
return result.json()
except requests.exceptions.RequestException as e: # This is the correct syntax
print(e)
print(f"Run 'zappa tail {environment} --since=3m' to see recent error logs.")
sys.exit(1) | def predict(environment: str, data: Dict) -> Dict:
"""
A convenience method to invoke and test the AWS Lambda instance endpoint.
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:param data: A dictionary that is send to the AWS Lambda endpoint and directly to the `predict` method in your model
file
:return: Your prediction
"""
zappa_settings = helper.read_zappa(helper.default_zappa_settings_path())
project_name = zappa_settings[environment][constants.PROJECT_NAME]
region_name = zappa_settings[environment][constants.REGION]
a_lambda_name = helper.lambda_name(environment, project_name)
# TODO: get api url for project/environment
url = helper.get_api_url(a_lambda_name, environment, region_name)
predict_url = url + '/' + project_name + '/' + 'predict'
headers = {"Content-type": "application/json"}
try:
result = requests.post(predict_url, headers=headers, data=json.dumps(dict(data=data)))
result.raise_for_status()
return result.json()
except requests.exceptions.RequestException as e: # This is the correct syntax
print(e)
print(f"Run 'zappa tail {environment} --since=3m' to see recent error logs.")
sys.exit(1) |
Python | def info(environment: str) -> Dict:
"""
Convenience method to display useful information about your settings based on your environment.
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:return: Data about your instance based on `environment`.
"""
zappa_settings = helper.read_zappa(helper.default_zappa_settings_path())
project_name = zappa_settings[environment][constants.PROJECT_NAME]
region_name = zappa_settings[environment][constants.REGION]
lambda_name = slugify.slugify(project_name + '-' + environment)
# TODO: get api url for project/environment
url = helper.get_api_url(lambda_name, environment, region_name)
predict_url = url + '/' + project_name + '/' + 'predict'
return dict(url=predict_url) | def info(environment: str) -> Dict:
"""
Convenience method to display useful information about your settings based on your environment.
:param environment: corresponds to the highest key in `zappa_settings.json` e.g `staging` or `production`.
:return: Data about your instance based on `environment`.
"""
zappa_settings = helper.read_zappa(helper.default_zappa_settings_path())
project_name = zappa_settings[environment][constants.PROJECT_NAME]
region_name = zappa_settings[environment][constants.REGION]
lambda_name = slugify.slugify(project_name + '-' + environment)
# TODO: get api url for project/environment
url = helper.get_api_url(lambda_name, environment, region_name)
predict_url = url + '/' + project_name + '/' + 'predict'
return dict(url=predict_url) |
Python | def check_venv():
""" Ensure we're inside a virtualenv. """
# Just for `init`, when we don't have settings yet.
venv = get_current_venv()
if not venv:
raise ValueError('Need a virtual environment to be activate first') | def check_venv():
""" Ensure we're inside a virtualenv. """
# Just for `init`, when we don't have settings yet.
venv = get_current_venv()
if not venv:
raise ValueError('Need a virtual environment to be activate first') |
Python | def initialize(self, context: ThampiContext) -> None:
"""
This method is called once when AWS Lambda first loads the model. You can override this method to setup up
global state(e.g. self.database_connection) which you can access within the `predict` method
:param context: See documentation for Thampi Context API
"""
pass | def initialize(self, context: ThampiContext) -> None:
"""
This method is called once when AWS Lambda first loads the model. You can override this method to setup up
global state(e.g. self.database_connection) which you can access within the `predict` method
:param context: See documentation for Thampi Context API
"""
pass |
Python | def predict(self, args: Dict, context: ThampiContext) -> Dict:
"""
This method is called when the client hits the predict endpoint.
:param args: The `data` value sent by the client request is populated here.
:param context: See documentation for Thampi Context API
:return: Returns a dictionary which is automatically converted to JSON and sent back to the client.
"""
pass | def predict(self, args: Dict, context: ThampiContext) -> Dict:
"""
This method is called when the client hits the predict endpoint.
:param args: The `data` value sent by the client request is populated here.
:param context: See documentation for Thampi Context API
:return: Returns a dictionary which is automatically converted to JSON and sent back to the client.
"""
pass |
Python | def init(clip_size):
"""Generate an initial random parameter.
Args:
clip_size (float): the parameter will be clipped
to the domain [-clip_size, clipsize].
Returns:
float: the random clipped parameter
"""
return np.random.rand() * 2 * clip_size - clip_size | def init(clip_size):
"""Generate an initial random parameter.
Args:
clip_size (float): the parameter will be clipped
to the domain [-clip_size, clipsize].
Returns:
float: the random clipped parameter
"""
return np.random.rand() * 2 * clip_size - clip_size |
Python | def on_state(a, cutoff):
"""The ON resource state we would like to learn.
|psi> = |0> + i*sqrt(3/2)*a|1> + a*i|3>
Args:
a (float): the ON state parameter
cutoff (int): the Fock basis truncation
Returns:
array: the density matrix rho=|psi><psi|
"""
ket = np.zeros([cutoff], dtype=np.complex128)
ket[0] = 1.
ket[1] = 1j*np.sqrt(3/2)*a
ket[3] = 1j*a
ket = ket/np.linalg.norm(ket)
dm = np.einsum('i,j->ij', ket, np.conj(ket))
return dm | def on_state(a, cutoff):
"""The ON resource state we would like to learn.
|psi> = |0> + i*sqrt(3/2)*a|1> + a*i|3>
Args:
a (float): the ON state parameter
cutoff (int): the Fock basis truncation
Returns:
array: the density matrix rho=|psi><psi|
"""
ket = np.zeros([cutoff], dtype=np.complex128)
ket[0] = 1.
ket[1] = 1j*np.sqrt(3/2)*a
ket[3] = 1j*a
ket = ket/np.linalg.norm(ket)
dm = np.einsum('i,j->ij', ket, np.conj(ket))
return dm |
Python | def circuit(params, a, m, cutoff):
"""Runs the constrained variational circuit with specified parameters,
returning the output fidelity to the requested ON state, as well as
the post-selection probability.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
tuple: a tuple containing the output fidelity to the target ON state,
the probability of post-selection, the state norm before entering the beamsplitter,
the state norm after exiting the beamsplitter, and the density matrix of the output state.
"""
# define target state
ONdm = on_state(a, cutoff)
# unpack circuit parameters
sq0_r, sq0_phi, disp0_r, disp0_phi, sq1_r, sq1_phi, disp1_r, disp1_phi, theta, phi = params
# quantum circuit prior to entering the beamsplitter
prog1 = sf.Program(2)
with prog1.context as q1:
Sgate(sq0_r, sq0_phi) | q1[0]
Dgate(disp0_r, disp0_phi) | q1[0]
Sgate(sq1_r, sq1_phi) | q1[1]
Dgate(disp1_r, disp1_phi) | q1[1]
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
stateIn = eng.run(prog1).state
normIn = np.abs(stateIn.trace())
# norm of output state and probability
prog_BS = sf.Program(2)
with prog_BS.context as q1:
BSgate(theta, phi) | (q1[0], q1[1])
stateOut = eng.run(prog_BS).state
normOut = np.abs(stateOut.trace())
rho = stateOut.dm()
# probability of meausring m1 and m2
prob = np.abs(np.trace(rho[m, m]))
# output state
rhoB = rho[m, m]/prob
fidelity = np.abs(np.trace(np.einsum('ij,jk->ik', rhoB, ONdm)))
return (fidelity, prob, normIn, normOut, rhoB) | def circuit(params, a, m, cutoff):
"""Runs the constrained variational circuit with specified parameters,
returning the output fidelity to the requested ON state, as well as
the post-selection probability.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
tuple: a tuple containing the output fidelity to the target ON state,
the probability of post-selection, the state norm before entering the beamsplitter,
the state norm after exiting the beamsplitter, and the density matrix of the output state.
"""
# define target state
ONdm = on_state(a, cutoff)
# unpack circuit parameters
sq0_r, sq0_phi, disp0_r, disp0_phi, sq1_r, sq1_phi, disp1_r, disp1_phi, theta, phi = params
# quantum circuit prior to entering the beamsplitter
prog1 = sf.Program(2)
with prog1.context as q1:
Sgate(sq0_r, sq0_phi) | q1[0]
Dgate(disp0_r, disp0_phi) | q1[0]
Sgate(sq1_r, sq1_phi) | q1[1]
Dgate(disp1_r, disp1_phi) | q1[1]
eng = sf.Engine("fock", backend_options={"cutoff_dim": cutoff})
stateIn = eng.run(prog1).state
normIn = np.abs(stateIn.trace())
# norm of output state and probability
prog_BS = sf.Program(2)
with prog_BS.context as q1:
BSgate(theta, phi) | (q1[0], q1[1])
stateOut = eng.run(prog_BS).state
normOut = np.abs(stateOut.trace())
rho = stateOut.dm()
# probability of meausring m1 and m2
prob = np.abs(np.trace(rho[m, m]))
# output state
rhoB = rho[m, m]/prob
fidelity = np.abs(np.trace(np.einsum('ij,jk->ik', rhoB, ONdm)))
return (fidelity, prob, normIn, normOut, rhoB) |
Python | def loss(params, a, m, cutoff):
"""Returns the loss function of the constrained variational circuit.
The loss function is given by:
loss = -fidelity + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
Therefore, minimising the loss function will result in the output state
approaching the target ON state.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
float: loss value.
"""
fidelity, _, normIn, normOut, _ = circuit(params, a, m, cutoff)
loss = -fidelity + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
return loss | def loss(params, a, m, cutoff):
"""Returns the loss function of the constrained variational circuit.
The loss function is given by:
loss = -fidelity + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
Therefore, minimising the loss function will result in the output state
approaching the target ON state.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
float: loss value.
"""
fidelity, _, normIn, normOut, _ = circuit(params, a, m, cutoff)
loss = -fidelity + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
return loss |
Python | def loss_with_prob(params, a, m, cutoff):
"""Returns the loss function of the constrained variational circuit
with post-selection probability to be also maximised.
The loss function is given by:
loss = -fidelity - prob + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
Therefore, minimising the loss function will result in the output state
approaching the target ON state, while also maximising the probability
of generating the output state.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
float: loss value.
"""
fidelity, prob, normIn, normOut, _ = circuit(params, a, m, cutoff)
loss = -fidelity -prob + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
return loss | def loss_with_prob(params, a, m, cutoff):
"""Returns the loss function of the constrained variational circuit
with post-selection probability to be also maximised.
The loss function is given by:
loss = -fidelity - prob + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
Therefore, minimising the loss function will result in the output state
approaching the target ON state, while also maximising the probability
of generating the output state.
Args:
params (list): list of gate parameters for the constrained
variational quantum circuit. This should contain the following values
in the following order:
* ``'sq0' = (r, phi)``: the squeezing magnitude and phase on mode 0
* ``'disp0' = (r, phi)``: the displacement magnitude and phase on mode 0
* ``'sq1' = (r, phi)``: the squeezing magnitude and phase on mode 1
* ``'disp1' = (r, phi)``: the displacement magnitude and phase on mode 1
* ``'BS' = (theta, phi)``: the beamsplitter angles
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
cutoff (int): the Fock basis truncation
Returns:
float: loss value.
"""
fidelity, prob, normIn, normOut, _ = circuit(params, a, m, cutoff)
loss = -fidelity -prob + 10*(1-np.abs(normIn)) + 10*(1-np.abs(normOut))
return loss |
Python | def run_global_optimization(a, m, nhp):
"""Run the constrained variational quantum circuit global optimization
using the basin hopping algorithm.
Args:
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
nhp (int): number of basin hopping iterations
Returns:
tuple: optimization results. A tuple of circuit parameters,
fidelity to the target state, and probability of generating the state.
"""
# circuit hyperparameters
clip_size = 1
cutoff = 15
# generate the initial parameters
bound = [clip_size, np.pi]*4+[np.pi]*2
x0 = map(init, bound)
# perform the optimization
minimizer_kwargs = {"method": "SLSQP", "args": (a, m, cutoff)} # SLSQP L-BFGS-B
print("hopping....")
res = basinhopping(loss, list(x0), minimizer_kwargs=minimizer_kwargs, niter=nhp)
#print the final restuls
x_f = res.x
fidelity, prob, _, _, _ = circuit(x_f, a, m, cutoff)
print("final fid {}, prob {}".format(fidelity, prob))
return res.x, fidelity, prob | def run_global_optimization(a, m, nhp):
"""Run the constrained variational quantum circuit global optimization
using the basin hopping algorithm.
Args:
a (float): the ON state parameter
m (int): the Fock state measurement to be post-selected
nhp (int): number of basin hopping iterations
Returns:
tuple: optimization results. A tuple of circuit parameters,
fidelity to the target state, and probability of generating the state.
"""
# circuit hyperparameters
clip_size = 1
cutoff = 15
# generate the initial parameters
bound = [clip_size, np.pi]*4+[np.pi]*2
x0 = map(init, bound)
# perform the optimization
minimizer_kwargs = {"method": "SLSQP", "args": (a, m, cutoff)} # SLSQP L-BFGS-B
print("hopping....")
res = basinhopping(loss, list(x0), minimizer_kwargs=minimizer_kwargs, niter=nhp)
#print the final restuls
x_f = res.x
fidelity, prob, _, _, _ = circuit(x_f, a, m, cutoff)
print("final fid {}, prob {}".format(fidelity, prob))
return res.x, fidelity, prob |
Python | def weights(self, method="random"):
""" Initialization method for the attribute 'weights'.
The argument 'order=tdl.BUILD' in the descriptor tells tdl
that 'weights' should be initialized when calling tdl.build.
The default value of 'order' is tdl.INIT, which initializes
attributes when __init__ is called
"""
return self._init_vec(self.units, method) | def weights(self, method="random"):
""" Initialization method for the attribute 'weights'.
The argument 'order=tdl.BUILD' in the descriptor tells tdl
that 'weights' should be initialized when calling tdl.build.
The default value of 'order' is tdl.INIT, which initializes
attributes when __init__ is called
"""
return self._init_vec(self.units, method) |
Python | def bias(self, method="zeros"):
""" Initialization method for attribute bias.
The 'reqs' argument in the descriptor indicates that the
initialization requires 'units' to be initialized first.
"""
return self._init_vec(self.units, method) | def bias(self, method="zeros"):
""" Initialization method for attribute bias.
The 'reqs' argument in the descriptor indicates that the
initialization requires 'units' to be initialized first.
"""
return self._init_vec(self.units, method) |
Python | def select_hyper_best_parameters(results, measure='psnr'):
"""
Selects the best hyper-parameter choice from a Dival ResultTable
:param results: Dival ResultTable with one task containing several subtasks
:param measure: Measure to use to select the best hyper-parameter choice
:return: Dival hyper-parameters dictionary and the corresponding measure
"""
best_choice = None
best_measure = -np.inf
assert len(results.results.loc[0]) == len(results.results)
for _, row in results.results.iterrows():
mean_measure = np.mean(row['measure_values'][measure])
if mean_measure > best_measure:
best_measure = mean_measure
best_choice = row['misc'].get('hp_choice', dict())
return best_choice, best_measure | def select_hyper_best_parameters(results, measure='psnr'):
"""
Selects the best hyper-parameter choice from a Dival ResultTable
:param results: Dival ResultTable with one task containing several subtasks
:param measure: Measure to use to select the best hyper-parameter choice
:return: Dival hyper-parameters dictionary and the corresponding measure
"""
best_choice = None
best_measure = -np.inf
assert len(results.results.loc[0]) == len(results.results)
for _, row in results.results.iterrows():
mean_measure = np.mean(row['measure_values'][measure])
if mean_measure > best_measure:
best_measure = mean_measure
best_choice = row['misc'].get('hp_choice', dict())
return best_choice, best_measure |
Python | def load_standard_dataset(dataset, impl=None, ordered=False):
"""
Loads a Dival standard dataset.
:param dataset: Name of the standard dataset
:param impl: Backend for the Ray Transform
:param ordered: Whether to order by patient id for 'lodopab' dataset
:param angle_indices: Indices of the angles to include (default is all).
:return: Dival dataset.
"""
if impl is None:
impl = 'astra_cpu'
if torch.cuda.is_available():
impl = 'astra_cuda'
kwargs = {'impl': impl}
if dataset == 'ellipses':
kwargs['fixed_seeds'] = True
# we do not use 'sorted_by_patient' here in order to be transparent to
# `CachedDataset`, where a `ReorderedDataset` is handled specially
# if dataset == 'lodopab':
# kwargs['sorted_by_patient'] = ordered
dataset_name = dataset.split('_')[0]
dataset_out = get_standard_dataset(dataset_name, **kwargs)
if dataset == 'lodopab_200':
angles = list(range(0, 1000, 5))
dataset_out = AngleSubsetDataset(dataset_out, angles)
if dataset_name == 'lodopab' and ordered:
idx = get_lodopab_idx_sorted_by_patient()
dataset_ordered = ReorderedDataset(dataset_out, idx)
dataset_ordered.ray_trafo = dataset_out.ray_trafo
dataset_ordered.get_ray_trafo = dataset_out.get_ray_trafo
dataset_out = dataset_ordered
return dataset_out | def load_standard_dataset(dataset, impl=None, ordered=False):
"""
Loads a Dival standard dataset.
:param dataset: Name of the standard dataset
:param impl: Backend for the Ray Transform
:param ordered: Whether to order by patient id for 'lodopab' dataset
:param angle_indices: Indices of the angles to include (default is all).
:return: Dival dataset.
"""
if impl is None:
impl = 'astra_cpu'
if torch.cuda.is_available():
impl = 'astra_cuda'
kwargs = {'impl': impl}
if dataset == 'ellipses':
kwargs['fixed_seeds'] = True
# we do not use 'sorted_by_patient' here in order to be transparent to
# `CachedDataset`, where a `ReorderedDataset` is handled specially
# if dataset == 'lodopab':
# kwargs['sorted_by_patient'] = ordered
dataset_name = dataset.split('_')[0]
dataset_out = get_standard_dataset(dataset_name, **kwargs)
if dataset == 'lodopab_200':
angles = list(range(0, 1000, 5))
dataset_out = AngleSubsetDataset(dataset_out, angles)
if dataset_name == 'lodopab' and ordered:
idx = get_lodopab_idx_sorted_by_patient()
dataset_ordered = ReorderedDataset(dataset_out, idx)
dataset_ordered.ray_trafo = dataset_out.ray_trafo
dataset_ordered.get_ray_trafo = dataset_out.get_ray_trafo
dataset_out = dataset_ordered
return dataset_out |
Python | def save_results_table(table, table_key):
"""
Saves results (hyper param selection) to a file
:param table: ResultTable
:param table_key: Key that identifies the results
"""
path = os.path.join(BASE_DIR, 'reports', table_key + '.txt')
with open(path, "w") as f:
f.write(table.to_string(show_columns=['misc'], max_colwidth=1000))
f.close() | def save_results_table(table, table_key):
"""
Saves results (hyper param selection) to a file
:param table: ResultTable
:param table_key: Key that identifies the results
"""
path = os.path.join(BASE_DIR, 'reports', table_key + '.txt')
with open(path, "w") as f:
f.write(table.to_string(show_columns=['misc'], max_colwidth=1000))
f.close() |
Python | def plot_reconstructions(reconstructions, titles, ray_trafo, obs, gt, save_name=None,
fig_size=(18, 4.5), cmap='pink'):
"""
Plots a ground-truth and several reconstructions
:param reconstructors: List of Reconstructor objects to compute the reconstructions
:param test_data: Data to apply the reconstruction methods
"""
psnrs = [PSNR(reco, gt) for reco in reconstructions]
ssims = [SSIM(reco, gt) for reco in reconstructions]
l2_error0 = np.sqrt(
np.sum(np.power(ray_trafo(gt).asarray() - obs.asarray(), 2)))
l2_error = [np.sqrt(np.sum(np.power(
ray_trafo(reco).asarray() - obs.asarray(), 2))) for reco in reconstructions]
# plot results
im, ax = plot_images([gt, ] + reconstructions, fig_size=fig_size, rect=(0.0, 0.0, 1.0, 1.0),
xticks=[], yticks=[], vrange=(0.0, 0.9 * np.max(gt.asarray())), cbar=False,
interpolation='none', cmap=cmap)
# set labels
ax[0].set_title('Ground Truth')
for j in range(len(reconstructions)):
ax[j + 1].set_title(titles[j])
ax[j + 1].set_xlabel('$\ell_2$ data error: {:.4f}\nPSNR: {:.1f}, SSIM: {:.2f}'
.format(l2_error[j], psnrs[j], ssims[j]))
ax[0].set_xlabel('$\ell_2$ data error: {:.2f}'.format(l2_error0))
plt.tight_layout()
plt.tight_layout()
if save_name:
plt.savefig('%s.pdf' % save_name)
plt.show() | def plot_reconstructions(reconstructions, titles, ray_trafo, obs, gt, save_name=None,
fig_size=(18, 4.5), cmap='pink'):
"""
Plots a ground-truth and several reconstructions
:param reconstructors: List of Reconstructor objects to compute the reconstructions
:param test_data: Data to apply the reconstruction methods
"""
psnrs = [PSNR(reco, gt) for reco in reconstructions]
ssims = [SSIM(reco, gt) for reco in reconstructions]
l2_error0 = np.sqrt(
np.sum(np.power(ray_trafo(gt).asarray() - obs.asarray(), 2)))
l2_error = [np.sqrt(np.sum(np.power(
ray_trafo(reco).asarray() - obs.asarray(), 2))) for reco in reconstructions]
# plot results
im, ax = plot_images([gt, ] + reconstructions, fig_size=fig_size, rect=(0.0, 0.0, 1.0, 1.0),
xticks=[], yticks=[], vrange=(0.0, 0.9 * np.max(gt.asarray())), cbar=False,
interpolation='none', cmap=cmap)
# set labels
ax[0].set_title('Ground Truth')
for j in range(len(reconstructions)):
ax[j + 1].set_title(titles[j])
ax[j + 1].set_xlabel('$\ell_2$ data error: {:.4f}\nPSNR: {:.1f}, SSIM: {:.2f}'
.format(l2_error[j], psnrs[j], ssims[j]))
ax[0].set_xlabel('$\ell_2$ data error: {:.2f}'.format(l2_error0))
plt.tight_layout()
plt.tight_layout()
if save_name:
plt.savefig('%s.pdf' % save_name)
plt.show() |
Python | def plot_iterations(recos, iters, save_name=None, fig_size=(18, 4.5), cmap='pink'):
"""
Plot several iterates of an iterative method
:param recos: List of reconstructions
:param iters: Iteration numbers
"""
im, ax = plot_images(recos, fig_size=fig_size, rect=(0.0, 0.0, 1.0, 1.0),
xticks=[], yticks=[], vrange=(0., 0.9), cbar=False,
interpolation='none', cmap=cmap)
for i in range(len(iters)):
ax[i].set_title('Iteration: %d' % iters[i])
plt.tight_layout()
plt.tight_layout()
if save_name:
plt.savefig('%s.pdf' % save_name)
plt.show() | def plot_iterations(recos, iters, save_name=None, fig_size=(18, 4.5), cmap='pink'):
"""
Plot several iterates of an iterative method
:param recos: List of reconstructions
:param iters: Iteration numbers
"""
im, ax = plot_images(recos, fig_size=fig_size, rect=(0.0, 0.0, 1.0, 1.0),
xticks=[], yticks=[], vrange=(0., 0.9), cbar=False,
interpolation='none', cmap=cmap)
for i in range(len(iters)):
ax[i].set_title('Iteration: %d' % iters[i])
plt.tight_layout()
plt.tight_layout()
if save_name:
plt.savefig('%s.pdf' % save_name)
plt.show() |
Python | def load_colormap(json_file):
"""Generates and returns a matplotlib colormap from the specified JSON file,
or None if the file was invalid."""
colormap = None
with open(json_file, "r") as fidin:
cmap_dict = json.load(fidin)
if cmap_dict.get('colors', None) is None:
return colormap
colormap_type = cmap_dict.get('type', 'linear')
colormap_name = cmap_dict.get('name', os.path.basename(json_file))
if colormap_type == 'linear':
colormap = colors.LinearSegmentedColormap.from_list(name=colormap_name,
colors=cmap_dict['colors'])
elif colormap_type == 'list':
colormap = colors.ListedColormap(name=colormap_name, colors=cmap_dict['colors'])
if 'vmin' in list(cmap_dict.keys()):
colormap_vmin = cmap_dict['vmin']
else:
colormap_vmin = None
if 'vmax' in list(cmap_dict.keys()):
colormap_vmax = cmap_dict['vmax']
else:
colormap_vmax = None
if 'tick_label' in list(cmap_dict.keys()):
colormap_ticklabel = cmap_dict['tick_label']
else:
colormap_ticklabel = None
if 'tick_loc' in list(cmap_dict.keys()):
colormap_tickloc = cmap_dict['tick_loc']
else:
colormap_tickloc = None
setattr(colormap, 'vmin', colormap_vmin)
setattr(colormap, 'vmax', colormap_vmax)
setattr(colormap, 'ticklabel', colormap_ticklabel)
setattr(colormap, 'tickloc', colormap_tickloc)
return colormap | def load_colormap(json_file):
"""Generates and returns a matplotlib colormap from the specified JSON file,
or None if the file was invalid."""
colormap = None
with open(json_file, "r") as fidin:
cmap_dict = json.load(fidin)
if cmap_dict.get('colors', None) is None:
return colormap
colormap_type = cmap_dict.get('type', 'linear')
colormap_name = cmap_dict.get('name', os.path.basename(json_file))
if colormap_type == 'linear':
colormap = colors.LinearSegmentedColormap.from_list(name=colormap_name,
colors=cmap_dict['colors'])
elif colormap_type == 'list':
colormap = colors.ListedColormap(name=colormap_name, colors=cmap_dict['colors'])
if 'vmin' in list(cmap_dict.keys()):
colormap_vmin = cmap_dict['vmin']
else:
colormap_vmin = None
if 'vmax' in list(cmap_dict.keys()):
colormap_vmax = cmap_dict['vmax']
else:
colormap_vmax = None
if 'tick_label' in list(cmap_dict.keys()):
colormap_ticklabel = cmap_dict['tick_label']
else:
colormap_ticklabel = None
if 'tick_loc' in list(cmap_dict.keys()):
colormap_tickloc = cmap_dict['tick_loc']
else:
colormap_tickloc = None
setattr(colormap, 'vmin', colormap_vmin)
setattr(colormap, 'vmax', colormap_vmax)
setattr(colormap, 'ticklabel', colormap_ticklabel)
setattr(colormap, 'tickloc', colormap_tickloc)
return colormap |
Python | def load(cmap_name, cmap_folder=colormaps_path()):
"""Returns the matplotlib colormap of the specified name -
if not found in the predefined
colormaps, searches for the colormap in the specified
folder (defaults to standard colormaps
folder if not specified)."""
if cmap_name.endswith('.cmap'):
cmap_name_user = cmap_name
else:
cmap_name_user = cmap_name + '.cmap'
user_colormaps = get_user_colormaps(cmap_folder)
system_colormaps = get_system_colormaps()
if cmap_name_user in user_colormaps:
cmap_file = os.path.join(cmap_folder, cmap_name_user)
cmap = load_colormap(cmap_file)
elif cmap_name in system_colormaps:
cmap = cm.get_cmap(cmap_name)
cmap_color = []
for iN in range(0, cmap.N):
cmap_color.append(list(cmap(iN)))
cmap_color[0][3] = 0.0
write_rgb_colormaps(os.path.join(cmap_folder, 'current_map.txt'), cmap_color)
cmap = colors.LinearSegmentedColormap.from_list(name=cmap_name, colors=cmap_color)
return cmap
else:
raise ValueError('Colormap not found')
return cmap | def load(cmap_name, cmap_folder=colormaps_path()):
"""Returns the matplotlib colormap of the specified name -
if not found in the predefined
colormaps, searches for the colormap in the specified
folder (defaults to standard colormaps
folder if not specified)."""
if cmap_name.endswith('.cmap'):
cmap_name_user = cmap_name
else:
cmap_name_user = cmap_name + '.cmap'
user_colormaps = get_user_colormaps(cmap_folder)
system_colormaps = get_system_colormaps()
if cmap_name_user in user_colormaps:
cmap_file = os.path.join(cmap_folder, cmap_name_user)
cmap = load_colormap(cmap_file)
elif cmap_name in system_colormaps:
cmap = cm.get_cmap(cmap_name)
cmap_color = []
for iN in range(0, cmap.N):
cmap_color.append(list(cmap(iN)))
cmap_color[0][3] = 0.0
write_rgb_colormaps(os.path.join(cmap_folder, 'current_map.txt'), cmap_color)
cmap = colors.LinearSegmentedColormap.from_list(name=cmap_name, colors=cmap_color)
return cmap
else:
raise ValueError('Colormap not found')
return cmap |
Python | def cut_extension(fn):
"""Cut everything to first period L->R."""
period = len(fn)
for index in range(len(fn)-1, -1, -1):
if fn[index] == '.':
period = index
retval = fn[:period]
return retval | def cut_extension(fn):
"""Cut everything to first period L->R."""
period = len(fn)
for index in range(len(fn)-1, -1, -1):
if fn[index] == '.':
period = index
retval = fn[:period]
return retval |
Python | def is_float(val):
"""Boolean function to say if this value is not a float."""
try:
float(val)
return True
except ValueError:
return False | def is_float(val):
"""Boolean function to say if this value is not a float."""
try:
float(val)
return True
except ValueError:
return False |
Python | def contains(string, substring):
"""Boolean function detecting if substring is part of string."""
try:
if string.index(substring) >= 0:
return True
except ValueError:
return False | def contains(string, substring):
"""Boolean function detecting if substring is part of string."""
try:
if string.index(substring) >= 0:
return True
except ValueError:
return False |
Python | def find_index_of(string, character, occurrence=1):
"""Find the n-th index of a character in a string."""
count = 0
for char_index in range(len(string)):
if string[char_index] == character:
count += 1
if count == occurrence:
return char_index
return -1 | def find_index_of(string, character, occurrence=1):
"""Find the n-th index of a character in a string."""
count = 0
for char_index in range(len(string)):
if string[char_index] == character:
count += 1
if count == occurrence:
return char_index
return -1 |
Python | def match_list_entries(a, b):
"""
Match up these two input lists by removing entries from one that correspond to nans or None in the other.
Lists must be of same length.
"""
indices_to_keep = []
if len(a) != len(b):
raise ValueError('Lists should be of same length!')
exclude = [None, float('nan'), float('inf'), 'nan']
for entry in range(len(a)):
if a[entry] not in exclude and \
b[entry] not in exclude and \
not np.isnan(a[entry]) and \
not np.isnan(b[entry]):
indices_to_keep.append(entry)
a = [a[i] for i in indices_to_keep]
b = [b[i] for i in indices_to_keep]
return a, b | def match_list_entries(a, b):
"""
Match up these two input lists by removing entries from one that correspond to nans or None in the other.
Lists must be of same length.
"""
indices_to_keep = []
if len(a) != len(b):
raise ValueError('Lists should be of same length!')
exclude = [None, float('nan'), float('inf'), 'nan']
for entry in range(len(a)):
if a[entry] not in exclude and \
b[entry] not in exclude and \
not np.isnan(a[entry]) and \
not np.isnan(b[entry]):
indices_to_keep.append(entry)
a = [a[i] for i in indices_to_keep]
b = [b[i] for i in indices_to_keep]
return a, b |
Python | def load_data(file_name, path, sep=',', add_ext=False, pandas_format=True, columns=None):
"""
Primary function to load data.
Will load any pandas csv. Optional separation indicator.
Will load all types of objects, strings, floats, etc. not just floats.
"""
if not add_ext:
csv_path = os.path.join(path, file_name)
else:
csv_path = os.path.join(path, file_name+'.csv')
if pandas_format:
frame = pd.read_csv(csv_path, sep=sep, index_col=0, low_memory=False)
else:
if columns: # no header row exists
frame = pd.read_csv(csv_path, sep=sep,
low_memory=False, header=None, names=columns)
else: # header row exists
frame = pd.read_csv(csv_path, sep=sep, low_memory=False)
return frame | def load_data(file_name, path, sep=',', add_ext=False, pandas_format=True, columns=None):
"""
Primary function to load data.
Will load any pandas csv. Optional separation indicator.
Will load all types of objects, strings, floats, etc. not just floats.
"""
if not add_ext:
csv_path = os.path.join(path, file_name)
else:
csv_path = os.path.join(path, file_name+'.csv')
if pandas_format:
frame = pd.read_csv(csv_path, sep=sep, index_col=0, low_memory=False)
else:
if columns: # no header row exists
frame = pd.read_csv(csv_path, sep=sep,
low_memory=False, header=None, names=columns)
else: # header row exists
frame = pd.read_csv(csv_path, sep=sep, low_memory=False)
return frame |
Python | def save_data(frame, file_name, path, suffix='', cut_ext=False):
"""
Primary function to save data to csv.
If file name contains an extension, can be cut by setting cut_ext to True.
"""
if cut_ext:
name = cut_extension(file_name) + suffix + '.csv'
else:
name = file_name + suffix + '.csv'
frame.to_csv(os.path.join(path, name), na_rep='None') | def save_data(frame, file_name, path, suffix='', cut_ext=False):
"""
Primary function to save data to csv.
If file name contains an extension, can be cut by setting cut_ext to True.
"""
if cut_ext:
name = cut_extension(file_name) + suffix + '.csv'
else:
name = file_name + suffix + '.csv'
frame.to_csv(os.path.join(path, name), na_rep='None') |
Python | def xlsx_to_csv(xlsx_name, csv_name, path):
"""Convert file at path from xlsx to csv."""
# convert to csv
# setup:
# RUN
# sudo apt install gnumeric
# sudo rm -f /etc/machine-id # might be able to get away without doing this
# sudo dbus-uuidgen --ensure=/etc/machine-id
flags = ['ssconvert', '-S', os.path.join(
path, xlsx_name), os.path.join(path, csv_name)]
run(flags) | def xlsx_to_csv(xlsx_name, csv_name, path):
"""Convert file at path from xlsx to csv."""
# convert to csv
# setup:
# RUN
# sudo apt install gnumeric
# sudo rm -f /etc/machine-id # might be able to get away without doing this
# sudo dbus-uuidgen --ensure=/etc/machine-id
flags = ['ssconvert', '-S', os.path.join(
path, xlsx_name), os.path.join(path, csv_name)]
run(flags) |
Python | def rename(old_name, new_name, old_path, new_path=None):
"""Rename one file with a new file name."""
old_file = os.path.join(old_path, old_name)
new_file = os.path.join(
old_path, new_name) if new_path is not None else os.path.join(new_path, new_name)
call(['mv', old_file, new_file]) | def rename(old_name, new_name, old_path, new_path=None):
"""Rename one file with a new file name."""
old_file = os.path.join(old_path, old_name)
new_file = os.path.join(
old_path, new_name) if new_path is not None else os.path.join(new_path, new_name)
call(['mv', old_file, new_file]) |
Python | def where(frame, col, value):
"""Take slice of dataframe where col has value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr == value:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def where(frame, col, value):
"""Take slice of dataframe where col has value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr == value:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def where_partial(frame, col, value):
"""Take slice of dataframe where col partially contains value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if contains(curr, value):
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def where_partial(frame, col, value):
"""Take slice of dataframe where col partially contains value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if contains(curr, value):
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def where_multi(frame, cols, values):
"""Take slice of dataframe where list of cols have corresponding values."""
rows = []
for row in range(frame.shape[0]):
curr = list(frame.iloc[row][cols])
if curr == values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def where_multi(frame, cols, values):
"""Take slice of dataframe where list of cols have corresponding values."""
rows = []
for row in range(frame.shape[0]):
curr = list(frame.iloc[row][cols])
if curr == values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def where_any(frame, col, values):
"""
Take a slice of dataframe where col matches any of the values (list).
If matches is False, the rows that match values are not included and those that do not are.
It turns the function into an "all except these" where.
"""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr in values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def where_any(frame, col, values):
"""
Take a slice of dataframe where col matches any of the values (list).
If matches is False, the rows that match values are not included and those that do not are.
It turns the function into an "all except these" where.
"""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr in values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def remove_where(frame, col, values):
"""
Take a slice of dataframe where col matches any of the values (list).
If matches is False, the rows that match values are not included and those that do not are.
It turns the function into an "all except these" where.
"""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr not in values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def remove_where(frame, col, values):
"""
Take a slice of dataframe where col matches any of the values (list).
If matches is False, the rows that match values are not included and those that do not are.
It turns the function into an "all except these" where.
"""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if curr not in values:
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def remove_partial(frame, col, value):
"""Take slice of dataframe where col contains value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if not contains(curr, value):
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame | def remove_partial(frame, col, value):
"""Take slice of dataframe where col contains value."""
rows = []
for row in range(frame.shape[0]):
curr = frame.iloc[row][col]
if not contains(curr, value):
rows.append(row)
frame = frame.iloc[rows]
frame = renumber_index(frame)
return frame |
Python | def renumber_index(frame):
"""Renumbers the row indices of frame for proper .iloc[] indexing."""
indices = pd.Series(list(range(frame.shape[0])))
frame.index = indices
return frame | def renumber_index(frame):
"""Renumbers the row indices of frame for proper .iloc[] indexing."""
indices = pd.Series(list(range(frame.shape[0])))
frame.index = indices
return frame |
Python | def functional_replace(frame, old, fun, cols=None):
"""
Apply passed function to all values matching old in col.
Input:
* frame
* old = value to replace
* new = value to replace with
* cols = LIST of columns to replace within.
Defaults to all.
An example of this is to get global values for step numbers used across system,
Instead of using string currently in place.
"""
columns = cols if cols is not None else frame.columns
for row in range(frame.shape[0]):
for col in columns:
if frame.loc[row, col] == old:
frame.loc[row, col] = fun(old)
return frame | def functional_replace(frame, old, fun, cols=None):
"""
Apply passed function to all values matching old in col.
Input:
* frame
* old = value to replace
* new = value to replace with
* cols = LIST of columns to replace within.
Defaults to all.
An example of this is to get global values for step numbers used across system,
Instead of using string currently in place.
"""
columns = cols if cols is not None else frame.columns
for row in range(frame.shape[0]):
for col in columns:
if frame.loc[row, col] == old:
frame.loc[row, col] = fun(old)
return frame |
Python | def convert_to_numeric(frame):
"""
Convert this dataset to a numeric-typed dataset.
Input: A dataset that contains only numbers but has fields labeled as objects or other.
Output: The same dataset, retyped to appropriate numerics (floats, ints, etc).
"""
for column in frame.columns:
frame.loc[:, column] = pd.to_numeric(
frame.loc[:, column], errors='coerce')
return frame | def convert_to_numeric(frame):
"""
Convert this dataset to a numeric-typed dataset.
Input: A dataset that contains only numbers but has fields labeled as objects or other.
Output: The same dataset, retyped to appropriate numerics (floats, ints, etc).
"""
for column in frame.columns:
frame.loc[:, column] = pd.to_numeric(
frame.loc[:, column], errors='coerce')
return frame |
Python | def reorder_columns(frame, front_columns):
"""
Re-order the columns of frame placing front_columns first.
Good for looking specifically at certain columns for specific file in pipeline.
"""
# reorder columns into appropriate order
new_cols = list(frame.columns)
for d in front_columns:
new_cols.remove(d)
new_cols = front_columns + new_cols
frame = frame[new_cols]
return frame | def reorder_columns(frame, front_columns):
"""
Re-order the columns of frame placing front_columns first.
Good for looking specifically at certain columns for specific file in pipeline.
"""
# reorder columns into appropriate order
new_cols = list(frame.columns)
for d in front_columns:
new_cols.remove(d)
new_cols = front_columns + new_cols
frame = frame[new_cols]
return frame |
Python | def drop_column_suffix(frame, suffix):
"""Drop this suffix from each column that contains it."""
frame.columns = [col if not col.endswith(
suffix) else col[:-len(suffix)] for col in frame.columns]
return frame | def drop_column_suffix(frame, suffix):
"""Drop this suffix from each column that contains it."""
frame.columns = [col if not col.endswith(
suffix) else col[:-len(suffix)] for col in frame.columns]
return frame |
Python | def add_column(frame, column, fill_value=None):
"""Take folder and add fill_value category of that value to each row."""
if column not in list(frame.columns):
frame[column] = fill_value
return frame | def add_column(frame, column, fill_value=None):
"""Take folder and add fill_value category of that value to each row."""
if column not in list(frame.columns):
frame[column] = fill_value
return frame |
Python | def slice_column_values(frame, column, start=0, end=None):
"""
Take frame and trim every value in the column specified by the number of indices specified.
Take specified slice of each column.
The default values of start and end do not change the length of any of the values.
"""
old_series = frame[column]
new_series = []
for row in range(old_series.shape[0]):
curr = old_series[row]
piece = curr[start:end] if end is not None else curr[start:len(curr)]
new_series.append(piece)
new_series = pd.Series(new_series, name=column)
frame[column] = new_series
return frame | def slice_column_values(frame, column, start=0, end=None):
"""
Take frame and trim every value in the column specified by the number of indices specified.
Take specified slice of each column.
The default values of start and end do not change the length of any of the values.
"""
old_series = frame[column]
new_series = []
for row in range(old_series.shape[0]):
curr = old_series[row]
piece = curr[start:end] if end is not None else curr[start:len(curr)]
new_series.append(piece)
new_series = pd.Series(new_series, name=column)
frame[column] = new_series
return frame |
Python | def isolate_before_nans(frame, attr):
"""Return a dataset that does not contain nans at the end."""
for row in range(frame.shape[0]-1, 0, -1):
if frame.iloc[row][attr] != float('nan'):
return frame.loc[:row, :]
return frame | def isolate_before_nans(frame, attr):
"""Return a dataset that does not contain nans at the end."""
for row in range(frame.shape[0]-1, 0, -1):
if frame.iloc[row][attr] != float('nan'):
return frame.loc[:row, :]
return frame |
Python | def combine_files(suffix, folders, path):
"""
From each folder, take each file with this suffix, load it and append it to the resultant frame.
Input:
suffix = End of file name, before extension
folders = list of folder to collect files from
path = root path to all folders referenced
Output:
Saved file labeled 'dataset_'+suffix, containing the dataframe of all frames appended.
Data from each folder is labeled with a column 'folder' with the folder name embedded.
Returns:
Frame that is saved.
Suffix should not include .csv
Only .csv files should be specified.
"""
frame = None
for folder in folders:
folder_path = os.path.join(path, folder)
try:
df = load_data(folder + suffix, folder_path, add_ext=True)
df = add_column(df, 'folder', folder)
frame = frame.append(df, sort=True) if frame is not None else df
except FileNotFoundError:
print('\tskipping', folder)
continue
save_data(frame, 'dataset'+suffix, path, suffix)
return frame | def combine_files(suffix, folders, path):
"""
From each folder, take each file with this suffix, load it and append it to the resultant frame.
Input:
suffix = End of file name, before extension
folders = list of folder to collect files from
path = root path to all folders referenced
Output:
Saved file labeled 'dataset_'+suffix, containing the dataframe of all frames appended.
Data from each folder is labeled with a column 'folder' with the folder name embedded.
Returns:
Frame that is saved.
Suffix should not include .csv
Only .csv files should be specified.
"""
frame = None
for folder in folders:
folder_path = os.path.join(path, folder)
try:
df = load_data(folder + suffix, folder_path, add_ext=True)
df = add_column(df, 'folder', folder)
frame = frame.append(df, sort=True) if frame is not None else df
except FileNotFoundError:
print('\tskipping', folder)
continue
save_data(frame, 'dataset'+suffix, path, suffix)
return frame |
Python | def combine_rows(frame, on, suffix_attr):
"""
Join rows into one row based on category.
Input:
* frame = pd.DataFrame this is about
* on = attribute that each row that will be combined shares in value
* suffix_attr = attribute that differentiates rows to be combined with each other
Returns:
* joined data frame
"""
joined_df = None
for row in range(frame.shape[0]):
curr = frame.loc[row, :].to_frame('curr').transpose()
suffix = str(curr.iloc[0][suffix_attr])
if joined_df is not None:
joined_df = pd.merge(
joined_df, curr, on=on, suffixes=('', '_'+suffix))
else:
curr.columns = [str(col) + '_' + suffix if str(col)
!= on else str(col) for col in curr.columns]
joined_df = curr
step_cols = [
col for col in joined_df.columns if contains(col, suffix_attr)]
joined_df = joined_df.drop(step_cols, axis=1)
return joined_df | def combine_rows(frame, on, suffix_attr):
"""
Join rows into one row based on category.
Input:
* frame = pd.DataFrame this is about
* on = attribute that each row that will be combined shares in value
* suffix_attr = attribute that differentiates rows to be combined with each other
Returns:
* joined data frame
"""
joined_df = None
for row in range(frame.shape[0]):
curr = frame.loc[row, :].to_frame('curr').transpose()
suffix = str(curr.iloc[0][suffix_attr])
if joined_df is not None:
joined_df = pd.merge(
joined_df, curr, on=on, suffixes=('', '_'+suffix))
else:
curr.columns = [str(col) + '_' + suffix if str(col)
!= on else str(col) for col in curr.columns]
joined_df = curr
step_cols = [
col for col in joined_df.columns if contains(col, suffix_attr)]
joined_df = joined_df.drop(step_cols, axis=1)
return joined_df |
Python | def normalize_dataset(frame):
"""
Normalize frame between [-1, 1] using StandardScalar from sklearn.
Note: In machine learning context, this should be done on train, validation, and test separately.
"""
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
pipeline = Pipeline([
('std_scaler', StandardScaler()),
])
frame = pipeline.fit_transform(frame)
return frame | def normalize_dataset(frame):
"""
Normalize frame between [-1, 1] using StandardScalar from sklearn.
Note: In machine learning context, this should be done on train, validation, and test separately.
"""
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
pipeline = Pipeline([
('std_scaler', StandardScaler()),
])
frame = pipeline.fit_transform(frame)
return frame |
Python | def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X) | def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X) |
Python | def safe_asarray(X, dtype=None, order=None):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
assert_all_finite(X.data)
else:
X = np.asarray(X, dtype, order)
assert_all_finite(X)
return X | def safe_asarray(X, dtype=None, order=None):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
assert_all_finite(X.data)
else:
X = np.asarray(X, dtype, order)
assert_all_finite(X)
return X |
Python | def array2d(X, dtype=None, order=None, copy=False):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d | def array2d(X, dtype=None, order=None, copy=False):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d |
Python | def atleast2d_or_csc(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc") | def atleast2d_or_csc(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc") |
Python | def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if not varargs is None:
raise RuntimeError('crab recommenders should always '
'specify their parameters in the signature'
' of their init (no varargs).')
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
args.sort()
return args | def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if not varargs is None:
raise RuntimeError('crab recommenders should always '
'specify their parameters in the signature'
' of their init (no varargs).')
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
except TypeError:
# No explicit __init__
args = []
args.sort()
return args |
Python | def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted((params.iteritems()))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines | def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted((params.iteritems()))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines |
Python | def manhattan_distances(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
This distance implementation is the distance between two points in a grid
based on a strictly horizontal and/or vertical path (that is, along the
grid lines as opposed to the diagonal or "as the crow flies" distance.
The Manhattan distance is the simple sum of the horizontal and vertical
components, whereas the diagonal distance might be computed by applying the
Pythagorean theorem.
The resulting unbounded distance is then mapped between 0 and 1.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import manhattan_distances
>>> X = [[2.5, 3.5, 3.0, 3.5, 2.5, 3.0], [2.5, 3.5, 3.0, 3.5, 2.5, 3.0]]
>>> # distance between rows of X
>>> manhattan_distances(X, X)
array([[ 1., 1.],
[ 1., 1.]])
>>> manhattan_distances(X, [[3.0, 3.5, 1.5, 5.0, 3.5, 3.0]])
array([[ 0.25],
[ 0.25]])
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does"
"not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
D = np.sum(D, axis=2)
return 1.0 - (D / float(n_features_X)) | def manhattan_distances(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
This distance implementation is the distance between two points in a grid
based on a strictly horizontal and/or vertical path (that is, along the
grid lines as opposed to the diagonal or "as the crow flies" distance.
The Manhattan distance is the simple sum of the horizontal and vertical
components, whereas the diagonal distance might be computed by applying the
Pythagorean theorem.
The resulting unbounded distance is then mapped between 0 and 1.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import manhattan_distances
>>> X = [[2.5, 3.5, 3.0, 3.5, 2.5, 3.0], [2.5, 3.5, 3.0, 3.5, 2.5, 3.0]]
>>> # distance between rows of X
>>> manhattan_distances(X, X)
array([[ 1., 1.],
[ 1., 1.]])
>>> manhattan_distances(X, [[3.0, 3.5, 1.5, 5.0, 3.5, 3.0]])
array([[ 0.25],
[ 0.25]])
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does"
"not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
D = np.sum(D, axis=2)
return 1.0 - (D / float(n_features_X)) |
Python | def jaccard_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
This correlation implementation is a statistic used for comparing the
similarity and diversity of sample sets.
The Jaccard coefficient measures similarity between sample sets,
and is defined as the size of the intersection divided by the size of the
union of the sample sets.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import jaccard_coefficient
>>> X = [['a', 'b', 'c', 'd'],['e', 'f','g']]
>>> # distance between rows of X
>>> jaccard_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> jaccard_coefficient(X, [['a', 'b', 'c', 'k']])
array([[ 0.6],
[ 0. ]])
"""
X = safe_asarray(X)
Y = safe_asarray(Y)
#TODO: Fix to work with sparse matrices.
if issparse(X) or issparse(Y):
raise ValueError('Jaccard does not yet support sparse matrices.')
#TODO: Check if it is possible to optimize this function
sX = X.shape[0]
sY = Y.shape[0]
dm = np.zeros((sX, sY))
for i in xrange(0, sX):
for j in xrange(0, sY):
sx = set(X[i])
sy = set(Y[j])
n_XY = len(sx & sy)
d_XY = len(sx | sy)
dm[i, j] = n_XY / float(d_XY)
return dm | def jaccard_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
This correlation implementation is a statistic used for comparing the
similarity and diversity of sample sets.
The Jaccard coefficient measures similarity between sample sets,
and is defined as the size of the intersection divided by the size of the
union of the sample sets.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import jaccard_coefficient
>>> X = [['a', 'b', 'c', 'd'],['e', 'f','g']]
>>> # distance between rows of X
>>> jaccard_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> jaccard_coefficient(X, [['a', 'b', 'c', 'k']])
array([[ 0.6],
[ 0. ]])
"""
X = safe_asarray(X)
Y = safe_asarray(Y)
#TODO: Fix to work with sparse matrices.
if issparse(X) or issparse(Y):
raise ValueError('Jaccard does not yet support sparse matrices.')
#TODO: Check if it is possible to optimize this function
sX = X.shape[0]
sY = Y.shape[0]
dm = np.zeros((sX, sY))
for i in xrange(0, sX):
for j in xrange(0, sY):
sx = set(X[i])
sy = set(Y[j])
n_XY = len(sx & sy)
d_XY = len(sx | sy)
dm[i, j] = n_XY / float(d_XY)
return dm |
Python | def tanimoto_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
An implementation of a "similarity" based on the Tanimoto coefficient,
or extended Jaccard coefficient.
This is intended for "binary" data sets where a user either expresses a
generic "yes" preference for an item or has no preference. The actual
preference values do not matter here, only their presence or absence.
Parameters
----------
X: array of shape n_samples_1
Y: array of shape n_samples_2
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import tanimoto_coefficient
>>> X = [['a', 'b', 'c', 'd'],['e', 'f','g']]
>>> # distance between rows of X
>>> tanimoto_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> tanimoto_coefficient(X, [['a', 'b', 'c', 'k']])
array([[ 0.6],
[ 0. ]])
"""
return jaccard_coefficient(X, Y) | def tanimoto_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
An implementation of a "similarity" based on the Tanimoto coefficient,
or extended Jaccard coefficient.
This is intended for "binary" data sets where a user either expresses a
generic "yes" preference for an item or has no preference. The actual
preference values do not matter here, only their presence or absence.
Parameters
----------
X: array of shape n_samples_1
Y: array of shape n_samples_2
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import tanimoto_coefficient
>>> X = [['a', 'b', 'c', 'd'],['e', 'f','g']]
>>> # distance between rows of X
>>> tanimoto_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> tanimoto_coefficient(X, [['a', 'b', 'c', 'k']])
array([[ 0.6],
[ 0. ]])
"""
return jaccard_coefficient(X, Y) |
Python | def cosine_distances(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
An implementation of the cosine similarity. The result is the cosine of
the angle formed between the two preference vectors.
Note that this similarity does not "center" its data, shifts the user's
preference values so that each of their means is 0. For this behavior,
use Pearson Coefficient, which actually is mathematically
equivalent for centered data.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import cosine_distances
>>> X = [[2.5, 3.5, 3.0, 3.5, 2.5, 3.0],[2.5, 3.5, 3.0, 3.5, 2.5, 3.0]]
>>> # distance between rows of X
>>> cosine_distances(X, X)
array([[ 1., 1.],
[ 1., 1.]])
>>> cosine_distances(X, [[3.0, 3.5, 1.5, 5.0, 3.5,3.0]])
array([[ 0.9606463],
[ 0.9606463]])
"""
X, Y = check_pairwise_arrays(X, Y)
#TODO: Fix to work with sparse matrices.
if issparse(X) or issparse(Y):
raise ValueError('Cosine does not yet support sparse matrices.')
return 1. - ssd.cdist(X, Y, 'cosine') | def cosine_distances(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
An implementation of the cosine similarity. The result is the cosine of
the angle formed between the two preference vectors.
Note that this similarity does not "center" its data, shifts the user's
preference values so that each of their means is 0. For this behavior,
use Pearson Coefficient, which actually is mathematically
equivalent for centered data.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import cosine_distances
>>> X = [[2.5, 3.5, 3.0, 3.5, 2.5, 3.0],[2.5, 3.5, 3.0, 3.5, 2.5, 3.0]]
>>> # distance between rows of X
>>> cosine_distances(X, X)
array([[ 1., 1.],
[ 1., 1.]])
>>> cosine_distances(X, [[3.0, 3.5, 1.5, 5.0, 3.5,3.0]])
array([[ 0.9606463],
[ 0.9606463]])
"""
X, Y = check_pairwise_arrays(X, Y)
#TODO: Fix to work with sparse matrices.
if issparse(X) or issparse(Y):
raise ValueError('Cosine does not yet support sparse matrices.')
return 1. - ssd.cdist(X, Y, 'cosine') |
Python | def _spearman_r(X, Y):
"""
Calculates a Spearman rank-order correlation coefficient
and the p-value to test for non-correlation.
"""
rho, p_value = spearman(X, Y)
return rho | def _spearman_r(X, Y):
"""
Calculates a Spearman rank-order correlation coefficient
and the p-value to test for non-correlation.
"""
rho, p_value = spearman(X, Y)
return rho |
Python | def spearman_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Like Pearson Coefficient , but compares relative ranking of preference
values instead of preference values themselves. That is, each user's
preferences are sorted and then assign a rank as their preference value,
with 1 being assigned to the least preferred item.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import spearman_coefficient
>>> X = [[('a',2.5),('b', 3.5), ('c',3.0), ('d',3.5)], \
[('e', 2.5),('f', 3.0), ('g', 2.5), ('h', 4.0)] ]
>>> # distance between rows of X
>>> spearman_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> spearman_coefficient(X, [[('a',2.5),('b', 3.5), ('c',3.0), ('k',3.5)]])
array([[ 1.],
[ 0.]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
if X is Y:
X = Y = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])
else:
X = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])
Y = np.asanyarray(Y, dtype=[('x', 'S30'), ('y', float)])
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices")
X.sort(order='y')
Y.sort(order='y')
result = []
#TODO: Check if it is possible to optimize this function
i = 0
for arrayX in X:
result.append([])
for arrayY in Y:
Y_keys = [key for key, value in arrayY]
XY = [(key, value) for key, value in arrayX if key in Y_keys]
sumDiffSq = 0.0
for index, tup in enumerate(XY):
sumDiffSq += pow((index + 1) - (Y_keys.index(tup[0]) + 1), 2.0)
n = len(XY)
if n == 0:
result[i].append(0.0)
else:
result[i].append(1.0 - ((6.0 * sumDiffSq) / (n * (n * n - 1))))
result[i] = np.asanyarray(result[i])
i += 1
return np.asanyarray(result) | def spearman_coefficient(X, Y):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
Like Pearson Coefficient , but compares relative ranking of preference
values instead of preference values themselves. That is, each user's
preferences are sorted and then assign a rank as their preference value,
with 1 being assigned to the least preferred item.
Parameters
----------
X: array of shape (n_samples_1, n_features)
Y: array of shape (n_samples_2, n_features)
Returns
-------
distances: array of shape (n_samples_1, n_samples_2)
Examples
--------
>>> from crab.metrics.pairwise import spearman_coefficient
>>> X = [[('a',2.5),('b', 3.5), ('c',3.0), ('d',3.5)], \
[('e', 2.5),('f', 3.0), ('g', 2.5), ('h', 4.0)] ]
>>> # distance between rows of X
>>> spearman_coefficient(X, X)
array([[ 1., 0.],
[ 0., 1.]])
>>> spearman_coefficient(X, [[('a',2.5),('b', 3.5), ('c',3.0), ('k',3.5)]])
array([[ 1.],
[ 0.]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
if X is Y:
X = Y = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])
else:
X = np.asanyarray(X, dtype=[('x', 'S30'), ('y', float)])
Y = np.asanyarray(Y, dtype=[('x', 'S30'), ('y', float)])
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices")
X.sort(order='y')
Y.sort(order='y')
result = []
#TODO: Check if it is possible to optimize this function
i = 0
for arrayX in X:
result.append([])
for arrayY in Y:
Y_keys = [key for key, value in arrayY]
XY = [(key, value) for key, value in arrayX if key in Y_keys]
sumDiffSq = 0.0
for index, tup in enumerate(XY):
sumDiffSq += pow((index + 1) - (Y_keys.index(tup[0]) + 1), 2.0)
n = len(XY)
if n == 0:
result[i].append(0.0)
else:
result[i].append(1.0 - ((6.0 * sumDiffSq) / (n * (n * n - 1))))
result[i] = np.asanyarray(result[i])
i += 1
return np.asanyarray(result) |
Python | def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage) | def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage) |
Python | def FindBest(source, text, keys=['name'],print_=False):
"""
Given a dictionary and a text, find the best matched item from the dictionary using the name
:param source: The dictionary to search from (i.e. units, gears, jobs, etc)
:type source: dict
:param text: String to find the match
:type text: str
:return: The best matched item from the dictionary
:rtype: dict
"""
text=text.title()
# XXX: Purposely shadowing the text parameter
# Calculate the match score for each key in the source dictionary using the input text.
# Then, create a list of (key, the best score) tuples.
if not keys:
similarities = [
(key, jellyfish.jaro_winkler(text, key.title()))
for key,item in source.items() # items in searched in list
]
key, score = max(similarities, key=lambda s: s[1])
best_match = source[key]
if print_:
print("{name} is the best match for input '{input}' with score of {score}".format(
name=best_match, input=text, score=score
))
return best_match,score
else:
similarities = [
(key, jellyfish.jaro_winkler(text, ival.title()))
for key,item in source.items() # items in searched in list
for val in keys # value to look for in item
if val in item # if value in item
for ival in traverse(item[val]) # fix for arrays
]
# Find the key with the highest score (This is the best matched key)
key, score = max(similarities, key=lambda s: s[1])
# XXX: If needed, implement a minimum threshold here
# Return the actual best-matched value
best_match = source[key]
if print_:
print("{name} is the best match for input '{input}' with score of {score}".format(
name=best_match.get('name'), input=text, score=score
))
return key | def FindBest(source, text, keys=['name'],print_=False):
"""
Given a dictionary and a text, find the best matched item from the dictionary using the name
:param source: The dictionary to search from (i.e. units, gears, jobs, etc)
:type source: dict
:param text: String to find the match
:type text: str
:return: The best matched item from the dictionary
:rtype: dict
"""
text=text.title()
# XXX: Purposely shadowing the text parameter
# Calculate the match score for each key in the source dictionary using the input text.
# Then, create a list of (key, the best score) tuples.
if not keys:
similarities = [
(key, jellyfish.jaro_winkler(text, key.title()))
for key,item in source.items() # items in searched in list
]
key, score = max(similarities, key=lambda s: s[1])
best_match = source[key]
if print_:
print("{name} is the best match for input '{input}' with score of {score}".format(
name=best_match, input=text, score=score
))
return best_match,score
else:
similarities = [
(key, jellyfish.jaro_winkler(text, ival.title()))
for key,item in source.items() # items in searched in list
for val in keys # value to look for in item
if val in item # if value in item
for ival in traverse(item[val]) # fix for arrays
]
# Find the key with the highest score (This is the best matched key)
key, score = max(similarities, key=lambda s: s[1])
# XXX: If needed, implement a minimum threshold here
# Return the actual best-matched value
best_match = source[key]
if print_:
print("{name} is the best match for input '{input}' with score of {score}".format(
name=best_match.get('name'), input=text, score=score
))
return key |
Python | async def search(self, ctx, *, query: str=None):
""" Search the public web for a query """
if query is None:
return await ctx.send('You are supposed to enter a query after this command, smh', delete_after=5)
splt = query.split()[1:]
ms = ' '.join(splt)
regex = re.compile('<div class="g">')
addr = urllib.request.quote(ms)
addr = 'https://www.startpage.com/do/search?q=' + addr
req = urllib.request.Request(addr, None, self.headers)
response = urllib.request.urlopen(req)
htm = response.read().decode('utf8')
try:
result_page = htm.split('<ol class="list-flat">')[1].split('</ol>')[0]
except IndexError:
return await ctx.send('oops! try later', delete_after=23)
result_page = re.sub('<a[\s\S]*?>', '<a>', result_page).replace('<li><a>Anonymous View</a></li>', '').split('</li>')
for m in range(len(result_page)): result_page[m] = re.sub('<[\s\S]*?>', ' ', result_page[m].replace('<br>', '\n'))
search_result = []
for m in result_page[:3]:
m = html.unescape(m)
_header, li = m.strip().split(' \n \n ')
t = li.split(' \n \n\n \n \n ')
if len(t) == 1:
desc = '[**StartPage:**]'
link = t[0]
else:
link, desc = t
if link[:4] != 'http': link = 'http://' + link
search_result.append([_header, desc, '<' + link + '>'])
await ctx.send(search_result) | async def search(self, ctx, *, query: str=None):
""" Search the public web for a query """
if query is None:
return await ctx.send('You are supposed to enter a query after this command, smh', delete_after=5)
splt = query.split()[1:]
ms = ' '.join(splt)
regex = re.compile('<div class="g">')
addr = urllib.request.quote(ms)
addr = 'https://www.startpage.com/do/search?q=' + addr
req = urllib.request.Request(addr, None, self.headers)
response = urllib.request.urlopen(req)
htm = response.read().decode('utf8')
try:
result_page = htm.split('<ol class="list-flat">')[1].split('</ol>')[0]
except IndexError:
return await ctx.send('oops! try later', delete_after=23)
result_page = re.sub('<a[\s\S]*?>', '<a>', result_page).replace('<li><a>Anonymous View</a></li>', '').split('</li>')
for m in range(len(result_page)): result_page[m] = re.sub('<[\s\S]*?>', ' ', result_page[m].replace('<br>', '\n'))
search_result = []
for m in result_page[:3]:
m = html.unescape(m)
_header, li = m.strip().split(' \n \n ')
t = li.split(' \n \n\n \n \n ')
if len(t) == 1:
desc = '[**StartPage:**]'
link = t[0]
else:
link, desc = t
if link[:4] != 'http': link = 'http://' + link
search_result.append([_header, desc, '<' + link + '>'])
await ctx.send(search_result) |
Python | async def activeplayers(self, ctx:commands.Context, rt_or_ct:str, number_of_days:int):
"""This command shows the number of active players in each class. You can specify the number of activity time limit. If you're unsure, 5 days is a good activity time limit."""
if number_of_days < 1 or number_of_days > 365:
await ctx.send(f'"{number_of_days}" is not a valid option. Valid options are between 1 days and 365 days.')
return
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if not self.__role_updating_task__.is_running():
to_delete = await ctx.send("Pulling player data...")
try:
await pull_data(self.message_sender, False, ctx) #pulls player data, and if no test cutoff data, pulls that too
finally:
await to_delete.delete()
await send_active_players(ctx, is_rt, number_of_days) | async def activeplayers(self, ctx:commands.Context, rt_or_ct:str, number_of_days:int):
"""This command shows the number of active players in each class. You can specify the number of activity time limit. If you're unsure, 5 days is a good activity time limit."""
if number_of_days < 1 or number_of_days > 365:
await ctx.send(f'"{number_of_days}" is not a valid option. Valid options are between 1 days and 365 days.')
return
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if not self.__role_updating_task__.is_running():
to_delete = await ctx.send("Pulling player data...")
try:
await pull_data(self.message_sender, False, ctx) #pulls player data, and if no test cutoff data, pulls that too
finally:
await to_delete.delete()
await send_active_players(ctx, is_rt, number_of_days) |
Python | async def inclass(self, ctx:commands.Context, rt_or_ct:str, number_of_days:int=None):
"""This command sends a list of players in each class. You can also specify an activity criteria.
IMPORTANT NOTE: If you do not specify an activity requirement, all players are included, regardless of whether they have played an event this season. If you did specify an activity requirement, the total number of players statistic is the total number of players in that class, regardless of whether they played an event this season or not. If you're only interested in the activity among people who have played at least 1 event this season, you should check out !activeplayers
Examples:
- To send a list of players in each RT class: !inclass rt
- To send a list of players in each CT class who have been active in the past 5 days: !inclass ct 5"""
if number_of_days is not None:
if number_of_days < 1 or number_of_days > 365:
await ctx.send(f'"{number_of_days}" is not a valid option. Valid options are between 1 days and 365 days. If you don\'t want to filter by activity, don\'t specify a number of days.')
return
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if not self.__role_updating_task__.is_running():
to_delete = await ctx.send("Pulling player data...")
try:
await pull_data(self.message_sender, False, ctx) #pulls player data, and if no test cutoff data, pulls that too
finally:
await to_delete.delete()
await send_file_with_players_in_each_class(ctx, is_rt, number_of_days) | async def inclass(self, ctx:commands.Context, rt_or_ct:str, number_of_days:int=None):
"""This command sends a list of players in each class. You can also specify an activity criteria.
IMPORTANT NOTE: If you do not specify an activity requirement, all players are included, regardless of whether they have played an event this season. If you did specify an activity requirement, the total number of players statistic is the total number of players in that class, regardless of whether they played an event this season or not. If you're only interested in the activity among people who have played at least 1 event this season, you should check out !activeplayers
Examples:
- To send a list of players in each RT class: !inclass rt
- To send a list of players in each CT class who have been active in the past 5 days: !inclass ct 5"""
if number_of_days is not None:
if number_of_days < 1 or number_of_days > 365:
await ctx.send(f'"{number_of_days}" is not a valid option. Valid options are between 1 days and 365 days. If you don\'t want to filter by activity, don\'t specify a number of days.')
return
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if not self.__role_updating_task__.is_running():
to_delete = await ctx.send("Pulling player data...")
try:
await pull_data(self.message_sender, False, ctx) #pulls player data, and if no test cutoff data, pulls that too
finally:
await to_delete.delete()
await send_file_with_players_in_each_class(ctx, is_rt, number_of_days) |
Python | async def testcutoffs(self, ctx:commands.Context):
"""This command sets test cutoffs, which can then be used by the !activeplayers, !hypotheticalroles, and !inclass commands to see the effect a proposed cutoff may have.
The syntax of the command is !testcutoffs ClassName, LowerCutoff, ClassName, LowerCutoff, ...
If you want your lower cutoff to be Negative Infinity, do -Infinity
For example, !testcutoffs Class F, -Infinity, Class E, 1500, Class D, 4000, Class C, 8000"""
testcutoff_error_message = """The syntax of the command is `!testcutoffs ClassName, LowerCutoff, ClassName, LowerCutoff, ...`
If you want your lower cutoff to be Negative Infinity, do -Infinity
For example, `!testcutoffs Class F, -Infinity, Class E, 1500, Class D, 4000, Class C, 8000`"""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is running role updating every {LOOP_TIME} seconds, which will override and test cutoffs you set. You should first do `!stop` and then use the `!testcutoffs` command.")
return
args = ctx.message.content.split(",")
args[0] = args[0][len("!testcutoffs"):]
LOWEST_CUTOFF = -99999999
if len(args) < 2:
await ctx.send(testcutoff_error_message)
return
allowed_negative_infinity_terms = ["-infinity", "negativeinfinity"]
new_cutoffs = []
cur_class_data = []
for ind, item in enumerate(args):
if ind % 2 == 0: #It's a Class name, new class
cur_class_data = []
cur_class_data.append(item.strip())
else:
lower_cutoff = None
temp = item.lower().replace(" ", "")
if common.isint(temp):
lower_cutoff = int(temp)
if lower_cutoff < LOWEST_CUTOFF:
await ctx.send(f'"{lower_cutoff}" is below the minimum number allowed: {LOWEST_CUTOFF}\n\nIf you want to do negative infinity, do "-infinity" for your cutoff number.')
return
elif temp in allowed_negative_infinity_terms:
lower_cutoff = None
else:
await ctx.send(f'"{item}" is not a valid number for the lower cutoff for class named {cur_class_data[0]}\n\n{testcutoff_error_message}')
return
cur_class_data.insert(0, lower_cutoff)
cur_class_data.append(0)
new_cutoffs.append((cur_class_data[0], cur_class_data[1], cur_class_data[2]))
new_cutoffs.sort(key=lambda cutoff_data: LOWEST_CUTOFF if cutoff_data[0] is None else cutoff_data[0], reverse=True)
common.test_cutoffs.clear()
common.test_cutoffs.extend(new_cutoffs)
await ctx.send("Test cutoffs:\n" + common.cutoff_display_text(common.test_cutoffs) + "\n\nYou can now use `!activeplayers` or `!hypotheticalroles` or `!inclass` to see how the proposed cutoffs will change things.") | async def testcutoffs(self, ctx:commands.Context):
"""This command sets test cutoffs, which can then be used by the !activeplayers, !hypotheticalroles, and !inclass commands to see the effect a proposed cutoff may have.
The syntax of the command is !testcutoffs ClassName, LowerCutoff, ClassName, LowerCutoff, ...
If you want your lower cutoff to be Negative Infinity, do -Infinity
For example, !testcutoffs Class F, -Infinity, Class E, 1500, Class D, 4000, Class C, 8000"""
testcutoff_error_message = """The syntax of the command is `!testcutoffs ClassName, LowerCutoff, ClassName, LowerCutoff, ...`
If you want your lower cutoff to be Negative Infinity, do -Infinity
For example, `!testcutoffs Class F, -Infinity, Class E, 1500, Class D, 4000, Class C, 8000`"""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is running role updating every {LOOP_TIME} seconds, which will override and test cutoffs you set. You should first do `!stop` and then use the `!testcutoffs` command.")
return
args = ctx.message.content.split(",")
args[0] = args[0][len("!testcutoffs"):]
LOWEST_CUTOFF = -99999999
if len(args) < 2:
await ctx.send(testcutoff_error_message)
return
allowed_negative_infinity_terms = ["-infinity", "negativeinfinity"]
new_cutoffs = []
cur_class_data = []
for ind, item in enumerate(args):
if ind % 2 == 0: #It's a Class name, new class
cur_class_data = []
cur_class_data.append(item.strip())
else:
lower_cutoff = None
temp = item.lower().replace(" ", "")
if common.isint(temp):
lower_cutoff = int(temp)
if lower_cutoff < LOWEST_CUTOFF:
await ctx.send(f'"{lower_cutoff}" is below the minimum number allowed: {LOWEST_CUTOFF}\n\nIf you want to do negative infinity, do "-infinity" for your cutoff number.')
return
elif temp in allowed_negative_infinity_terms:
lower_cutoff = None
else:
await ctx.send(f'"{item}" is not a valid number for the lower cutoff for class named {cur_class_data[0]}\n\n{testcutoff_error_message}')
return
cur_class_data.insert(0, lower_cutoff)
cur_class_data.append(0)
new_cutoffs.append((cur_class_data[0], cur_class_data[1], cur_class_data[2]))
new_cutoffs.sort(key=lambda cutoff_data: LOWEST_CUTOFF if cutoff_data[0] is None else cutoff_data[0], reverse=True)
common.test_cutoffs.clear()
common.test_cutoffs.extend(new_cutoffs)
await ctx.send("Test cutoffs:\n" + common.cutoff_display_text(common.test_cutoffs) + "\n\nYou can now use `!activeplayers` or `!hypotheticalroles` or `!inclass` to see how the proposed cutoffs will change things.") |
Python | async def resume(self, ctx): #suppress
"""This command resumes the bot keeping everyone's role up to date, which runs every 120 seconds."""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it, do `!stop`")
else:
self.first_run = False
self.__role_updating_task__.start()
await ctx.send("Resumed.") | async def resume(self, ctx): #suppress
"""This command resumes the bot keeping everyone's role up to date, which runs every 120 seconds."""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it, do `!stop`")
else:
self.first_run = False
self.__role_updating_task__.start()
await ctx.send("Resumed.") |
Python | async def stop(self, ctx):
"""This command stops the bot from keeping everyone's role up to date every 120 seconds."""
if not self.__role_updating_task__.is_running():
await ctx.send(f"The bot is not updating roles in the background. If you want to start updating roles in the background, do `!resume`")
else:
self.first_run = True
self.__role_updating_task__.cancel()
await ctx.send(f"The bot has stopped updating roles in the background. If you want to start updating roles in the background again, do `!resume`") | async def stop(self, ctx):
"""This command stops the bot from keeping everyone's role up to date every 120 seconds."""
if not self.__role_updating_task__.is_running():
await ctx.send(f"The bot is not updating roles in the background. If you want to start updating roles in the background, do `!resume`")
else:
self.first_run = True
self.__role_updating_task__.cancel()
await ctx.send(f"The bot has stopped updating roles in the background. If you want to start updating roles in the background again, do `!resume`") |
Python | async def updateroles(self, ctx):
"""This command resumes the bot keeping everyone's role up to date, which runs every 120 seconds."""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it and manually run the updating process **one time**, do `!stop` and then `!updateroles`. You should then start the automated role updating process again by doing `!resume`")
else:
await main(self.message_sender, verbose=True) | async def updateroles(self, ctx):
"""This command resumes the bot keeping everyone's role up to date, which runs every 120 seconds."""
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it and manually run the updating process **one time**, do `!stop` and then `!updateroles`. You should then start the automated role updating process again by doing `!resume`")
else:
await main(self.message_sender, verbose=True) |
Python | async def hypotheticalroles(self, ctx, rt_or_ct:str):
"""This command simply displays the roles each person were to receive and lose. It does not change anyone's roles. Before doing this command, you should !stop and then !testcutoffs to setup your test scenario."""
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it to set test cutoffs and view the role's people would hypothetically lose and receive, do `!stop` and then `!testcutoffs` and then `!hypotheticalroles`.")
else:
temp_message_sender = MessageSender(ctx.channel)
temp_message_sender.send_queued_messages.start()
await main(self.message_sender, verbose=True, modify_roles=False, only_rt=is_rt)
await asyncio.sleep(MessageSender.TIME_BETWEEN_MESSAGES+1)
temp_message_sender.send_queued_messages.cancel() | async def hypotheticalroles(self, ctx, rt_or_ct:str):
"""This command simply displays the roles each person were to receive and lose. It does not change anyone's roles. Before doing this command, you should !stop and then !testcutoffs to setup your test scenario."""
is_rt = None
if rt_or_ct.lower() == "rt":
is_rt = True
elif rt_or_ct.lower() == "ct":
is_rt = False
else:
await ctx.send(f'"{rt_or_ct}" is not a valid option. Valid options are: RT or CT')
return
if self.__role_updating_task__.is_running():
await ctx.send(f"The bot is already running role updating every {LOOP_TIME} seconds. If you want to stop it to set test cutoffs and view the role's people would hypothetically lose and receive, do `!stop` and then `!testcutoffs` and then `!hypotheticalroles`.")
else:
temp_message_sender = MessageSender(ctx.channel)
temp_message_sender.send_queued_messages.start()
await main(self.message_sender, verbose=True, modify_roles=False, only_rt=is_rt)
await asyncio.sleep(MessageSender.TIME_BETWEEN_MESSAGES+1)
temp_message_sender.send_queued_messages.cancel() |
Python | def dispatch_labels(train_data, dialogs):
""" Dispatch snorkel labels for each dialog"""
start_idx = 0
for dialog in dialogs:
num_function_dialogs = len(dialog['function_dialogs'])
end_idx = start_idx + num_function_dialogs
labels = train_data.label[start_idx:end_idx].tolist()
dialog['module_index'] = labels
start_idx += num_function_dialogs
return dialogs | def dispatch_labels(train_data, dialogs):
""" Dispatch snorkel labels for each dialog"""
start_idx = 0
for dialog in dialogs:
num_function_dialogs = len(dialog['function_dialogs'])
end_idx = start_idx + num_function_dialogs
labels = train_data.label[start_idx:end_idx].tolist()
dialog['module_index'] = labels
start_idx += num_function_dialogs
return dialogs |
Python | def generate_mask(self, mask):
''' returns a representation of the image thresholded for some specified hsv color range indicated by mask argument... mask is a dictionary containing entries for hue lower bound ("hl"), hue upper bound, saturation upper and lower bound, and volume upper and lower bound consistant with the desired threshold '''
hsv = cv2.cvtColor(self.bgr, cv2.COLOR_BGR2HSV)
assert(isinstance(mask, dict))
assert("hl" in mask and "sl" in mask and "vl" in mask)
assert("hh" in mask and "sh" in mask and "vh" in mask)
hsv_lower_values = [mask["hl"], mask["sl"], mask["vl"]]
lower_bound = np.array(hsv_lower_values, dtype=np.uint8)
hsv_upper_values = [mask["hh"], mask["sh"], mask["vh"]]
upper_bound = np.array(hsv_upper_values, dtype=np.uint8)
return cv2.inRange(hsv, lower_bound, upper_bound) | def generate_mask(self, mask):
''' returns a representation of the image thresholded for some specified hsv color range indicated by mask argument... mask is a dictionary containing entries for hue lower bound ("hl"), hue upper bound, saturation upper and lower bound, and volume upper and lower bound consistant with the desired threshold '''
hsv = cv2.cvtColor(self.bgr, cv2.COLOR_BGR2HSV)
assert(isinstance(mask, dict))
assert("hl" in mask and "sl" in mask and "vl" in mask)
assert("hh" in mask and "sh" in mask and "vh" in mask)
hsv_lower_values = [mask["hl"], mask["sl"], mask["vl"]]
lower_bound = np.array(hsv_lower_values, dtype=np.uint8)
hsv_upper_values = [mask["hh"], mask["sh"], mask["vh"]]
upper_bound = np.array(hsv_upper_values, dtype=np.uint8)
return cv2.inRange(hsv, lower_bound, upper_bound) |
Python | def contour_data(self, layer, dilate = 2):
''' returns contour data for contigious objects in a layer '''
default_gray = cv2.cvtColor(layer, cv2.COLOR_BGR2GRAY)
kernel = np.ones((dilate, dilate),np.uint8)
dilated_gray = cv2.dilate(default_gray, kernel, iterations=2)
ret, thresh = cv2.threshold(dilated_gray, 1, 255, cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
return contours, image | def contour_data(self, layer, dilate = 2):
''' returns contour data for contigious objects in a layer '''
default_gray = cv2.cvtColor(layer, cv2.COLOR_BGR2GRAY)
kernel = np.ones((dilate, dilate),np.uint8)
dilated_gray = cv2.dilate(default_gray, kernel, iterations=2)
ret, thresh = cv2.threshold(dilated_gray, 1, 255, cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
return contours, image |
Python | def geometric_centers(self, contours):
''' returns an array with the geometric centers of argument contours '''
coordinates = []
for contour in contours:
moments = cv2.moments(contour)
contourX = int(moments['m10'] / float(moments['m00']))
contourY = int(moments['m01'] / float(moments['m00']))
coordinates += [[contourX, contourY]]
return coordinates | def geometric_centers(self, contours):
''' returns an array with the geometric centers of argument contours '''
coordinates = []
for contour in contours:
moments = cv2.moments(contour)
contourX = int(moments['m10'] / float(moments['m00']))
contourY = int(moments['m01'] / float(moments['m00']))
coordinates += [[contourX, contourY]]
return coordinates |
Python | def signal_count(self, contours, min_area):
''' number of contours with area greater or equal to some parameter '''
signal = []
for contour in contours:
if cv2.contourArea(contour) >= min_area:
signal.append(contour)
return len(signal) | def signal_count(self, contours, min_area):
''' number of contours with area greater or equal to some parameter '''
signal = []
for contour in contours:
if cv2.contourArea(contour) >= min_area:
signal.append(contour)
return len(signal) |
Python | def distance(self, geometric_centers, radii):
''' computes distance between contours based on geometric center, but compensates for area with radius approximation, which assumes contours are globular '''
# TODO: fix broken computation
distances = []
while geometric_centers:
i = len(geometric_centers)
x1, y1 = geometric_centers.pop()
for x2, y2 in geometric_centers:
hypot = math.sqrt((x2-x1)**2+(y2-y1)**2)
distances.append(hypot-radii[i])
return distances | def distance(self, geometric_centers, radii):
''' computes distance between contours based on geometric center, but compensates for area with radius approximation, which assumes contours are globular '''
# TODO: fix broken computation
distances = []
while geometric_centers:
i = len(geometric_centers)
x1, y1 = geometric_centers.pop()
for x2, y2 in geometric_centers:
hypot = math.sqrt((x2-x1)**2+(y2-y1)**2)
distances.append(hypot-radii[i])
return distances |
Python | def background(self):
''' extract only uncolored background from image '''
kernel = np.ones((4,4), np.uint8)
eroded_img = cv2.erode(self.gray, kernel, iterations=2)
ret, thresh = cv2.threshold(eroded_img, 200, 255, cv2.THRESH_BINARY_INV)
return cv2.bitwise_and(self.bgr, self.bgr, mask = thresh) | def background(self):
''' extract only uncolored background from image '''
kernel = np.ones((4,4), np.uint8)
eroded_img = cv2.erode(self.gray, kernel, iterations=2)
ret, thresh = cv2.threshold(eroded_img, 200, 255, cv2.THRESH_BINARY_INV)
return cv2.bitwise_and(self.bgr, self.bgr, mask = thresh) |
Python | def copy_csv_file(src, new_file_name):
''' copies csv file with specified name to disk using standard io utilities '''
shutil.copy(src,new_file_name)
try:
os.path.isfile(new_file_name)
except IOError:
print("Due to an error in the IO component of this program, the expected output file has not been written") | def copy_csv_file(src, new_file_name):
''' copies csv file with specified name to disk using standard io utilities '''
shutil.copy(src,new_file_name)
try:
os.path.isfile(new_file_name)
except IOError:
print("Due to an error in the IO component of this program, the expected output file has not been written") |
Python | def read_col(file_name,col):
''' returns csv column in list form using pandas csv reader given column header and file name'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
data = pd.read_csv(file_name)[col]
return [col] + data.values.tolist() | def read_col(file_name,col):
''' returns csv column in list form using pandas csv reader given column header and file name'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
data = pd.read_csv(file_name)[col]
return [col] + data.values.tolist() |
Python | def add_col(file_name, data):
'''adds column to specified csv file at given column index given column data in list form using pandas csv utilities'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
table_data = pd.read_csv(file_name)
table_data.insert(len(table_data.columns),str(data[0]),data[1:],allow_duplicates=True)
table_data.to_csv(file_name,index=False) | def add_col(file_name, data):
'''adds column to specified csv file at given column index given column data in list form using pandas csv utilities'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
table_data = pd.read_csv(file_name)
table_data.insert(len(table_data.columns),str(data[0]),data[1:],allow_duplicates=True)
table_data.to_csv(file_name,index=False) |
Python | def replace_col(file_name,data):
''' overwrites column at specified index with specified list data for given file using pandas csv writer'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
df = pd.read_csv(file_name)
df.drop([str(data[0])], axis = 1 , inplace= True)
df.insert(len(df.columns),str(data[0]),data[1:],allow_duplicates=True)
df.to_csv(file_name,index=False) | def replace_col(file_name,data):
''' overwrites column at specified index with specified list data for given file using pandas csv writer'''
try:
os.path.isfile(file_name)
except IOError:
print("Due to an error in the IO component of this program, the referrenced output file does not exist")
df = pd.read_csv(file_name)
df.drop([str(data[0])], axis = 1 , inplace= True)
df.insert(len(df.columns),str(data[0]),data[1:],allow_duplicates=True)
df.to_csv(file_name,index=False) |
Python | def transform_file(file_path, *funcs):
"""
Transforms the file given in file_path by passing it
serially through all the functions in *func and then
writing it back out to file_path
"""
with open(file_path, 'r+') as f:
data = f.read()
f.seek(0)
for func in funcs:
data = func(data)
f.write(data)
print(file_path) | def transform_file(file_path, *funcs):
"""
Transforms the file given in file_path by passing it
serially through all the functions in *func and then
writing it back out to file_path
"""
with open(file_path, 'r+') as f:
data = f.read()
f.seek(0)
for func in funcs:
data = func(data)
f.write(data)
print(file_path) |
Python | def rot_state(self):
""" Update the current state and count state transitions """
pin_1_state = GPIO.input(self.PIN_1)
pin_2_state = GPIO.input(self.PIN_2)
next_state = (pin_1_state, pin_2_state)
prev_state = self.curr_state
if self.curr_state != next_state:
self.count += 1
self.curr_state = next_state
if clockwise_state_transitions[prev_state] == self.curr_state:
self.dir = CLOCKWISE
elif counterclockwise_state_transitions[prev_state] == self.curr_state:
self.dir = COUNTERCLOCKWISE | def rot_state(self):
""" Update the current state and count state transitions """
pin_1_state = GPIO.input(self.PIN_1)
pin_2_state = GPIO.input(self.PIN_2)
next_state = (pin_1_state, pin_2_state)
prev_state = self.curr_state
if self.curr_state != next_state:
self.count += 1
self.curr_state = next_state
if clockwise_state_transitions[prev_state] == self.curr_state:
self.dir = CLOCKWISE
elif counterclockwise_state_transitions[prev_state] == self.curr_state:
self.dir = COUNTERCLOCKWISE |
Python | def start(self):
""" Start listening to GPIO pins to trigger callback functions """
def on_rotation(channel):
""" Update rotation state and call user defined callback functions after complete rotation """
self.rot_state()
if self.count // self.transitions_per_rotation != 0:
if self.dir == CLOCKWISE:
self.on_clockwise_rotate()
elif self.dir == COUNTERCLOCKWISE:
self.on_counterclockwise_rotate()
self.count = 0
def button_pressed(channel):
""" Call the user defined callback when button is pressed """
if not GPIO.input(self.BUTTON_PIN):
self.on_click()
self.setup()
# add callback to be called when rotating encoder
GPIO.add_event_detect(self.PIN_1, GPIO.BOTH, callback=on_rotation, bouncetime=0)
GPIO.add_event_detect(self.PIN_2, GPIO.BOTH, callback=on_rotation, bouncetime=0)
# add callback to be called when button is pressed
GPIO.add_event_detect(
self.BUTTON_PIN,
GPIO.FALLING,
callback=button_pressed,
bouncetime=BOUNCETIME,
) | def start(self):
""" Start listening to GPIO pins to trigger callback functions """
def on_rotation(channel):
""" Update rotation state and call user defined callback functions after complete rotation """
self.rot_state()
if self.count // self.transitions_per_rotation != 0:
if self.dir == CLOCKWISE:
self.on_clockwise_rotate()
elif self.dir == COUNTERCLOCKWISE:
self.on_counterclockwise_rotate()
self.count = 0
def button_pressed(channel):
""" Call the user defined callback when button is pressed """
if not GPIO.input(self.BUTTON_PIN):
self.on_click()
self.setup()
# add callback to be called when rotating encoder
GPIO.add_event_detect(self.PIN_1, GPIO.BOTH, callback=on_rotation, bouncetime=0)
GPIO.add_event_detect(self.PIN_2, GPIO.BOTH, callback=on_rotation, bouncetime=0)
# add callback to be called when button is pressed
GPIO.add_event_detect(
self.BUTTON_PIN,
GPIO.FALLING,
callback=button_pressed,
bouncetime=BOUNCETIME,
) |
Python | def on_rotation(channel):
""" Update rotation state and call user defined callback functions after complete rotation """
self.rot_state()
if self.count // self.transitions_per_rotation != 0:
if self.dir == CLOCKWISE:
self.on_clockwise_rotate()
elif self.dir == COUNTERCLOCKWISE:
self.on_counterclockwise_rotate()
self.count = 0 | def on_rotation(channel):
""" Update rotation state and call user defined callback functions after complete rotation """
self.rot_state()
if self.count // self.transitions_per_rotation != 0:
if self.dir == CLOCKWISE:
self.on_clockwise_rotate()
elif self.dir == COUNTERCLOCKWISE:
self.on_counterclockwise_rotate()
self.count = 0 |
Python | def __setup_robots(self, robot_locations, team_colour):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
:param team_colour: The color (either "blue" or "yellow")
"""
if "blue" in team_colour:
robot_map = self.world_state.blue_robots
else:
robot_map = self.world_state.yellow_robots
for robot_id, robot_location in enumerate(robot_locations):
robot_map[robot_id].CopyFrom(
RobotState(
global_position=Point(
x_meters=robot_location.x(), y_meters=robot_location.y()
),
global_orientation=Angle(radians=0),
global_velocity=Vector(x_component_meters=0, y_component_meters=0),
global_angular_velocity=AngularVelocity(radians_per_second=0),
)
)
self.setup_world(self.world_state) | def __setup_robots(self, robot_locations, team_colour):
"""Initializes the world from a list of robot locations
:param robot_locations: A list of robot locations (index is robot id)
:param team_colour: The color (either "blue" or "yellow")
"""
if "blue" in team_colour:
robot_map = self.world_state.blue_robots
else:
robot_map = self.world_state.yellow_robots
for robot_id, robot_location in enumerate(robot_locations):
robot_map[robot_id].CopyFrom(
RobotState(
global_position=Point(
x_meters=robot_location.x(), y_meters=robot_location.y()
),
global_orientation=Angle(radians=0),
global_velocity=Vector(x_component_meters=0, y_component_meters=0),
global_angular_velocity=AngularVelocity(radians_per_second=0),
)
)
self.setup_world(self.world_state) |
Python | def __get_sensor_proto(self, ssl_wrapper, robot_status_listener):
"""Helper function to create a sensor proto
:param ssl_wrapper: The ssl_wrapper packet to put in the sensor proto
:param robot_status_listener: The robot status listener (blue or yellow)
:returns: A sensor proto with the robot status from the listener
"""
sensor_proto = SensorProto()
if ssl_wrapper:
sensor_proto.ssl_vision_msg.CopyFrom(ssl_wrapper)
robot_status = robot_status_listener.get_most_recent_message()
packets = []
while robot_status is not None:
packets.append(robot_status)
robot_status = robot_status_listener.get_most_recent_message()
sensor_proto.robot_status_msgs.extend(packets)
return sensor_proto | def __get_sensor_proto(self, ssl_wrapper, robot_status_listener):
"""Helper function to create a sensor proto
:param ssl_wrapper: The ssl_wrapper packet to put in the sensor proto
:param robot_status_listener: The robot status listener (blue or yellow)
:returns: A sensor proto with the robot status from the listener
"""
sensor_proto = SensorProto()
if ssl_wrapper:
sensor_proto.ssl_vision_msg.CopyFrom(ssl_wrapper)
robot_status = robot_status_listener.get_most_recent_message()
packets = []
while robot_status is not None:
packets.append(robot_status)
robot_status = robot_status_listener.get_most_recent_message()
sensor_proto.robot_status_msgs.extend(packets)
return sensor_proto |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.